summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2018-12-18 16:54:37 +0100
committerAndrej Shadura <andrewsh@debian.org>2018-12-18 16:54:37 +0100
commit009f8e9becfd911e5ebfc5f76f91c32f30e841fb (patch)
tree57cba64a92a4f44dbb793bcac82c183914b579bc
parent8e06f4e756a71e9ed81e46b1cc091f0ea78bbcb6 (diff)
parentd74515335df48009cfa750ea7955c7195171978b (diff)
Merge tag 'debian/0.33.9-2' into debian/stretch-backportsdebian/0.33.9-2_bpo9+1
matrix-synapse release 0.33.9-2 for unstable (sid) (maintainer view tag generated by dgit --quilt=gbp)
-rw-r--r--.circleci/config.yml166
-rwxr-xr-x.circleci/merge_base_branch.sh34
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml55
-rw-r--r--CHANGES.md333
-rw-r--r--CONTRIBUTING.rst38
-rw-r--r--MANIFEST.in10
-rw-r--r--MAP.rst35
-rw-r--r--README.rst147
-rw-r--r--UPGRADE.rst19
-rw-r--r--contrib/docker/docker-compose.yml8
-rw-r--r--contrib/grafana/synapse.json592
-rw-r--r--contrib/purge_api/README.md16
-rw-r--r--contrib/purge_api/purge_history.sh141
-rw-r--r--contrib/purge_api/purge_remote_media.sh54
-rw-r--r--debian/changelog21
-rw-r--r--debian/control23
-rw-r--r--debian/patches/0001-tox.patch19
-rw-r--r--debian/patches/0002-change_instructions.patch4
-rw-r--r--debian/patches/0004-webclient-instructions.patch2
-rw-r--r--debian/patches/0005-Honour-config.web_client.patch10
-rw-r--r--debian/patches/0006-Avoid-pip-install.patch6
-rw-r--r--debian/patches/series1
-rw-r--r--docker/Dockerfile60
-rw-r--r--docker/Dockerfile-pgtests12
-rw-r--r--docker/README.md1
-rw-r--r--docker/conf/homeserver.yaml10
-rwxr-xr-xdocker/run_pg_tests.sh20
-rwxr-xr-xdocker/start.py3
-rw-r--r--docs/consent_tracking.md39
-rw-r--r--docs/privacy_policy_templates/en/1.0.html15
-rwxr-xr-xjenkins-dendron-haproxy-postgres.sh23
-rwxr-xr-xjenkins-dendron-postgres.sh20
-rwxr-xr-xjenkins-flake8.sh22
-rwxr-xr-xjenkins-postgres.sh18
-rwxr-xr-xjenkins-sqlite.sh16
-rwxr-xr-xjenkins-unittests.sh30
-rwxr-xr-xjenkins/clone.sh44
-rwxr-xr-xjenkins/prepare_synapse.sh19
-rw-r--r--scripts-dev/check_auth.py36
-rw-r--r--scripts-dev/check_event_hash.py32
-rw-r--r--scripts-dev/check_signature.py36
-rw-r--r--scripts-dev/convert_server_keys.py40
-rwxr-xr-xscripts-dev/copyrighter-sql.pl33
-rwxr-xr-xscripts-dev/copyrighter.pl33
-rwxr-xr-xscripts-dev/definitions.py54
-rwxr-xr-xscripts-dev/dump_macaroon.py13
-rwxr-xr-xscripts-dev/federation_client.py106
-rw-r--r--scripts-dev/hash_history.py62
-rwxr-xr-xscripts-dev/list_url_patterns.py16
-rwxr-xr-xscripts-dev/make_identicons.pl39
-rwxr-xr-xscripts-dev/next_github_number.sh9
-rwxr-xr-xscripts-dev/nuke-room-from-db.sh57
-rw-r--r--scripts-dev/tail-synapse.py26
-rwxr-xr-xscripts/hash_password44
-rwxr-xr-xscripts/move_remote_media_to_new_store.py36
-rwxr-xr-xscripts/register_new_matrix_user184
-rwxr-xr-xscripts/synapse_port_db275
-rw-r--r--setup.cfg16
-rwxr-xr-xsetup.py6
-rw-r--r--synapse/__init__.py12
-rw-r--r--synapse/_scripts/__init__.py0
-rw-r--r--synapse/_scripts/register_new_matrix_user.py215
-rw-r--r--synapse/api/constants.py9
-rw-r--r--synapse/api/errors.py15
-rw-r--r--synapse/api/filtering.py8
-rw-r--r--synapse/api/urls.py3
-rw-r--r--synapse/app/__init__.py2
-rw-r--r--synapse/app/_base.py30
-rw-r--r--synapse/app/appservice.py3
-rw-r--r--synapse/app/client_reader.py3
-rw-r--r--synapse/app/event_creator.py6
-rw-r--r--synapse/app/federation_reader.py3
-rw-r--r--synapse/app/federation_sender.py3
-rw-r--r--synapse/app/frontend_proxy.py7
-rwxr-xr-xsynapse/app/homeserver.py68
-rw-r--r--synapse/app/media_repository.py3
-rw-r--r--synapse/app/pusher.py24
-rw-r--r--synapse/app/synchrotron.py29
-rwxr-xr-xsynapse/app/synctl.py284
-rw-r--r--synapse/app/user_dir.py3
-rw-r--r--synapse/appservice/api.py13
-rw-r--r--synapse/config/__main__.py4
-rw-r--r--synapse/config/_base.py121
-rw-r--r--synapse/config/consent_config.py18
-rw-r--r--synapse/config/emailconfig.py34
-rw-r--r--synapse/config/homeserver.py5
-rw-r--r--synapse/config/jwt_config.py (renamed from synapse/config/jwt.py)0
-rw-r--r--synapse/config/logger.py18
-rw-r--r--synapse/config/registration.py15
-rw-r--r--synapse/config/repository.py2
-rw-r--r--synapse/config/room_directory.py102
-rw-r--r--synapse/crypto/context_factory.py2
-rw-r--r--synapse/crypto/keyclient.py18
-rw-r--r--synapse/crypto/keyring.py135
-rw-r--r--synapse/event_auth.py17
-rw-r--r--synapse/events/__init__.py36
-rw-r--r--synapse/federation/federation_base.py34
-rw-r--r--synapse/federation/federation_client.py46
-rw-r--r--synapse/federation/federation_server.py99
-rw-r--r--synapse/federation/transaction_queue.py79
-rw-r--r--synapse/federation/transport/client.py24
-rw-r--r--synapse/federation/transport/server.py35
-rw-r--r--synapse/federation/units.py3
-rw-r--r--synapse/handlers/appservice.py17
-rw-r--r--synapse/handlers/auth.py47
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/directory.py135
-rw-r--r--synapse/handlers/e2e_keys.py7
-rw-r--r--synapse/handlers/e2e_room_keys.py297
-rw-r--r--synapse/handlers/federation.py576
-rw-r--r--synapse/handlers/groups_local.py18
-rw-r--r--synapse/handlers/initial_sync.py4
-rw-r--r--synapse/handlers/message.py50
-rw-r--r--synapse/handlers/pagination.py24
-rw-r--r--synapse/handlers/profile.py10
-rw-r--r--synapse/handlers/receipts.py2
-rw-r--r--synapse/handlers/register.py36
-rw-r--r--synapse/handlers/room.py428
-rw-r--r--synapse/handlers/room_list.py13
-rw-r--r--synapse/handlers/room_member.py5
-rw-r--r--synapse/handlers/search.py22
-rw-r--r--synapse/handlers/sync.py290
-rw-r--r--synapse/handlers/typing.py37
-rw-r--r--synapse/handlers/user_directory.py14
-rw-r--r--synapse/http/__init__.py4
-rw-r--r--synapse/http/client.py96
-rw-r--r--synapse/http/endpoint.py13
-rw-r--r--synapse/http/matrixfederationclient.py608
-rw-r--r--synapse/http/request_metrics.py53
-rw-r--r--synapse/http/server.py57
-rw-r--r--synapse/http/servlet.py9
-rw-r--r--synapse/http/site.py40
-rw-r--r--synapse/metrics/__init__.py108
-rw-r--r--synapse/metrics/background_process_metrics.py14
-rw-r--r--synapse/notifier.py35
-rw-r--r--synapse/push/emailpusher.py72
-rw-r--r--synapse/push/httppusher.py77
-rw-r--r--synapse/push/mailer.py22
-rw-r--r--synapse/push/push_rule_evaluator.py4
-rw-r--r--synapse/push/pusherpool.py135
-rw-r--r--synapse/python_dependencies.py51
-rw-r--r--synapse/replication/slave/storage/_base.py9
-rw-r--r--synapse/replication/slave/storage/deviceinbox.py12
-rw-r--r--synapse/replication/slave/storage/devices.py16
-rw-r--r--synapse/replication/slave/storage/groups.py8
-rw-r--r--synapse/replication/slave/storage/keys.py14
-rw-r--r--synapse/replication/slave/storage/presence.py6
-rw-r--r--synapse/replication/tcp/client.py2
-rw-r--r--synapse/replication/tcp/protocol.py24
-rw-r--r--synapse/replication/tcp/streams.py2
-rw-r--r--synapse/res/templates/mail-Vector.css (renamed from res/templates/mail-Vector.css)0
-rw-r--r--synapse/res/templates/mail.css (renamed from res/templates/mail.css)0
-rw-r--r--synapse/res/templates/notif.html (renamed from res/templates/notif.html)0
-rw-r--r--synapse/res/templates/notif.txt (renamed from res/templates/notif.txt)0
-rw-r--r--synapse/res/templates/notif_mail.html (renamed from res/templates/notif_mail.html)0
-rw-r--r--synapse/res/templates/notif_mail.txt (renamed from res/templates/notif_mail.txt)0
-rw-r--r--synapse/res/templates/room.html (renamed from res/templates/room.html)0
-rw-r--r--synapse/res/templates/room.txt (renamed from res/templates/room.txt)0
-rw-r--r--synapse/rest/__init__.py4
-rw-r--r--synapse/rest/client/v1/admin.py9
-rw-r--r--synapse/rest/client/v1/directory.py37
-rw-r--r--synapse/rest/client/v1/events.py12
-rw-r--r--synapse/rest/client/v1/initial_sync.py2
-rw-r--r--synapse/rest/client/v1/login.py44
-rw-r--r--synapse/rest/client/v1/push_rule.py24
-rw-r--r--synapse/rest/client/v1/pusher.py4
-rw-r--r--synapse/rest/client/v1/room.py17
-rw-r--r--synapse/rest/client/v1/voip.py6
-rw-r--r--synapse/rest/client/v2_alpha/account.py16
-rw-r--r--synapse/rest/client/v2_alpha/auth.py83
-rw-r--r--synapse/rest/client/v2_alpha/register.py25
-rw-r--r--synapse/rest/client/v2_alpha/room_keys.py387
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py89
-rw-r--r--synapse/rest/client/v2_alpha/sync.py53
-rw-r--r--synapse/rest/client/v2_alpha/thirdparty.py4
-rw-r--r--synapse/rest/consent/consent_resource.py39
-rw-r--r--synapse/rest/key/v1/__init__.py14
-rw-r--r--synapse/rest/key/v1/server_key_resource.py92
-rw-r--r--synapse/rest/key/v2/__init__.py4
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py6
-rw-r--r--synapse/rest/media/v0/content_repository.py4
-rw-r--r--synapse/rest/media/v1/_base.py30
-rw-r--r--synapse/rest/media/v1/download_resource.py13
-rw-r--r--synapse/rest/media/v1/identicon_resource.py68
-rw-r--r--synapse/rest/media/v1/media_repository.py53
-rw-r--r--synapse/rest/media/v1/media_storage.py8
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py42
-rw-r--r--synapse/rest/media/v1/storage_provider.py6
-rw-r--r--synapse/server.py11
-rw-r--r--synapse/server.pyi6
-rw-r--r--synapse/state/__init__.py108
-rw-r--r--synapse/state/v1.py16
-rw-r--r--synapse/state/v2.py548
-rw-r--r--synapse/storage/__init__.py2
-rw-r--r--synapse/storage/_base.py4
-rw-r--r--synapse/storage/client_ips.py34
-rw-r--r--synapse/storage/devices.py100
-rw-r--r--synapse/storage/directory.py3
-rw-r--r--synapse/storage/e2e_room_keys.py335
-rw-r--r--synapse/storage/end_to_end_keys.py5
-rw-r--r--synapse/storage/event_federation.py42
-rw-r--r--synapse/storage/events.py360
-rw-r--r--synapse/storage/keys.py3
-rw-r--r--synapse/storage/monthly_active_users.py99
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/pusher.py2
-rw-r--r--synapse/storage/registration.py37
-rw-r--r--synapse/storage/room.py2
-rw-r--r--synapse/storage/roommember.py65
-rw-r--r--synapse/storage/schema/delta/40/device_list_streams.sql9
-rw-r--r--synapse/storage/schema/delta/51/e2e_room_keys.sql39
-rw-r--r--synapse/storage/schema/delta/52/add_event_to_state_group_index.sql19
-rw-r--r--synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql36
-rw-r--r--synapse/storage/schema/delta/52/e2e_room_keys.sql53
-rw-r--r--synapse/storage/signatures.py2
-rw-r--r--synapse/storage/state.py876
-rw-r--r--synapse/storage/stream.py16
-rw-r--r--synapse/storage/transactions.py33
-rw-r--r--synapse/util/__init__.py56
-rw-r--r--synapse/util/async_helpers.py64
-rw-r--r--synapse/util/caches/__init__.py27
-rw-r--r--synapse/util/caches/expiringcache.py40
-rw-r--r--synapse/util/caches/stream_change_cache.py4
-rw-r--r--synapse/util/logcontext.py161
-rw-r--r--synapse/util/manhole.py50
-rw-r--r--synapse/util/metrics.py22
-rw-r--r--synapse/util/retryutils.py2
-rw-r--r--synapse/visibility.py30
-rwxr-xr-x[l---------]synctl297
-rwxr-xr-xtest_postgresql.sh12
-rw-r--r--tests/api/test_auth.py1
-rw-r--r--tests/api/test_filtering.py12
-rw-r--r--tests/app/test_frontend_proxy.py4
-rw-r--r--tests/config/test_generate.py2
-rw-r--r--tests/config/test_room_directory.py67
-rw-r--r--tests/events/test_utils.py4
-rw-r--r--tests/handlers/test_directory.py48
-rw-r--r--tests/handlers/test_e2e_room_keys.py392
-rw-r--r--tests/handlers/test_register.py75
-rw-r--r--tests/handlers/test_roomlist.py39
-rw-r--r--tests/handlers/test_typing.py4
-rw-r--r--tests/http/test_fedclient.py190
-rw-r--r--tests/push/__init__.py0
-rw-r--r--tests/push/test_email.py148
-rw-r--r--tests/push/test_http.py159
-rw-r--r--tests/replication/slave/storage/_base.py35
-rw-r--r--tests/replication/slave/storage/test_events.py6
-rw-r--r--tests/rest/client/test_consent.py118
-rw-r--r--tests/rest/client/v1/test_admin.py116
-rw-r--r--tests/rest/client/v1/test_register.py10
-rw-r--r--tests/rest/client/v1/test_rooms.py665
-rw-r--r--tests/rest/client/v1/utils.py22
-rw-r--r--tests/rest/client/v2_alpha/test_filter.py95
-rw-r--r--tests/rest/client/v2_alpha/test_register.py52
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py131
-rw-r--r--tests/rest/media/v1/test_url_preview.py164
-rw-r--r--tests/scripts/__init__.py0
-rw-r--r--tests/scripts/test_new_matrix_user.py160
-rw-r--r--tests/server.py183
-rw-r--r--tests/server_notices/test_consent.py100
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py40
-rw-r--r--tests/state/__init__.py0
-rw-r--r--tests/state/test_v2.py759
-rw-r--r--tests/storage/test_client_ips.py160
-rw-r--r--tests/storage/test_end_to_end_keys.py15
-rw-r--r--tests/storage/test_monthly_active_users.py87
-rw-r--r--tests/storage/test_state.py276
-rw-r--r--tests/storage/test_transactions.py45
-rw-r--r--tests/test_federation.py114
-rw-r--r--tests/test_mau.py61
-rw-r--r--tests/test_metrics.py81
-rw-r--r--tests/test_server.py86
-rw-r--r--tests/test_state.py2
-rw-r--r--tests/test_terms_auth.py123
-rw-r--r--tests/unittest.py111
-rw-r--r--tests/util/test_expiring_cache.py1
-rw-r--r--tests/util/test_logcontext.py5
-rw-r--r--tests/utils.py173
-rw-r--r--tox.ini99
280 files changed, 13112 insertions, 5314 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index e03f01b8..53950284 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,48 +1,172 @@
version: 2
jobs:
- sytestpy2:
+ dockerhubuploadrelease:
+ machine: true
+ steps:
+ - checkout
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
+ - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
+ - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
+ - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
+ dockerhubuploadlatest:
machine: true
steps:
- checkout
- - run: docker pull matrixdotorg/sytest-synapsepy2
- - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
+ - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
+ - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
+ - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
+ - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
+ - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
+ - run: docker push matrixdotorg/synapse:latest
+ - run: docker push matrixdotorg/synapse:latest-py3
+ sytestpy2:
+ docker:
+ - image: matrixdotorg/sytest-synapsepy2
+ working_directory: /src
+ steps:
+ - checkout
+ - run: /synapse_sytest.sh
- store_artifacts:
- path: ~/project/logs
+ path: /logs
destination: logs
+ - store_test_results:
+ path: /logs
sytestpy2postgres:
- machine: true
+ docker:
+ - image: matrixdotorg/sytest-synapsepy2
+ working_directory: /src
steps:
- checkout
- - run: docker pull matrixdotorg/sytest-synapsepy2
- - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
+ - run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
- path: ~/project/logs
+ path: /logs
destination: logs
+ - store_test_results:
+ path: /logs
+ sytestpy2merged:
+ docker:
+ - image: matrixdotorg/sytest-synapsepy2
+ working_directory: /src
+ steps:
+ - checkout
+ - run: bash .circleci/merge_base_branch.sh
+ - run: /synapse_sytest.sh
+ - store_artifacts:
+ path: /logs
+ destination: logs
+ - store_test_results:
+ path: /logs
+ sytestpy2postgresmerged:
+ docker:
+ - image: matrixdotorg/sytest-synapsepy2
+ working_directory: /src
+ steps:
+ - checkout
+ - run: bash .circleci/merge_base_branch.sh
+ - run: POSTGRES=1 /synapse_sytest.sh
+ - store_artifacts:
+ path: /logs
+ destination: logs
+ - store_test_results:
+ path: /logs
+
sytestpy3:
- machine: true
+ docker:
+ - image: matrixdotorg/sytest-synapsepy3
+ working_directory: /src
steps:
- checkout
- - run: docker pull matrixdotorg/sytest-synapsepy3
- - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
+ - run: /synapse_sytest.sh
- store_artifacts:
- path: ~/project/logs
+ path: /logs
destination: logs
+ - store_test_results:
+ path: /logs
sytestpy3postgres:
- machine: true
+ docker:
+ - image: matrixdotorg/sytest-synapsepy3
+ working_directory: /src
+ steps:
+ - checkout
+ - run: POSTGRES=1 /synapse_sytest.sh
+ - store_artifacts:
+ path: /logs
+ destination: logs
+ - store_test_results:
+ path: /logs
+ sytestpy3merged:
+ docker:
+ - image: matrixdotorg/sytest-synapsepy3
+ working_directory: /src
+ steps:
+ - checkout
+ - run: bash .circleci/merge_base_branch.sh
+ - run: /synapse_sytest.sh
+ - store_artifacts:
+ path: /logs
+ destination: logs
+ - store_test_results:
+ path: /logs
+ sytestpy3postgresmerged:
+ docker:
+ - image: matrixdotorg/sytest-synapsepy3
+ working_directory: /src
steps:
- checkout
- - run: docker pull matrixdotorg/sytest-synapsepy3
- - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
+ - run: bash .circleci/merge_base_branch.sh
+ - run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
- path: ~/project/logs
+ path: /logs
destination: logs
+ - store_test_results:
+ path: /logs
workflows:
version: 2
build:
jobs:
- - sytestpy2
- - sytestpy2postgres
-# Currently broken while the Python 3 port is incomplete
-# - sytestpy3
-# - sytestpy3postgres
+ - sytestpy2:
+ filters:
+ branches:
+ only: /develop|master|release-.*/
+ - sytestpy2postgres:
+ filters:
+ branches:
+ only: /develop|master|release-.*/
+ - sytestpy3:
+ filters:
+ branches:
+ only: /develop|master|release-.*/
+ - sytestpy3postgres:
+ filters:
+ branches:
+ only: /develop|master|release-.*/
+ - sytestpy2merged:
+ filters:
+ branches:
+ ignore: /develop|master|release-.*/
+ - sytestpy2postgresmerged:
+ filters:
+ branches:
+ ignore: /develop|master|release-.*/
+ - sytestpy3merged:
+ filters:
+ branches:
+ ignore: /develop|master|release-.*/
+ - sytestpy3postgresmerged:
+ filters:
+ branches:
+ ignore: /develop|master|release-.*/
+ - dockerhubuploadrelease:
+ filters:
+ tags:
+ only: /v[0-9].[0-9]+.[0-9]+.*/
+ branches:
+ ignore: /.*/
+ - dockerhubuploadlatest:
+ filters:
+ branches:
+ only: master
diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh
new file mode 100755
index 00000000..b2c8c40f
--- /dev/null
+++ b/.circleci/merge_base_branch.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+set -e
+
+# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
+# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
+echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
+source $BASH_ENV
+
+if [[ -z "${CIRCLE_PR_NUMBER}" ]]
+then
+ echo "Can't figure out what the PR number is! Assuming merge target is develop."
+
+ # It probably hasn't had a PR opened yet. Since all PRs land on develop, we
+ # can probably assume it's based on it and will be merged into it.
+ GITBASE="develop"
+else
+ # Get the reference, using the GitHub API
+ GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
+fi
+
+# Show what we are before
+git show -s
+
+# Set up username so it can do a merge
+git config --global user.email bot@matrix.org
+git config --global user.name "A robot"
+
+# Fetch and merge. If it doesn't work, it will raise due to set -e.
+git fetch -u origin $GITBASE
+git merge --no-edit origin/$GITBASE
+
+# Show what we are after.
+git show -s
diff --git a/.gitignore b/.gitignore
index 9f42a756..3b2252ad 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,11 @@
*.pyc
.*.swp
*~
+*.lock
.DS_Store
_trial_temp/
+_trial_temp*/
logs/
dbs/
*.egg
@@ -44,6 +46,7 @@ media_store/
build/
venv/
venv*/
+*venv/
localhost-800*/
static/client/register/register_config.js
diff --git a/.travis.yml b/.travis.yml
index 11c76db2..655fab9d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,39 +1,70 @@
sudo: false
language: python
-# tell travis to cache ~/.cache/pip
-cache: pip
+cache:
+ directories:
+ # we only bother to cache the wheels; parts of the http cache get
+ # invalidated every build (because they get served with a max-age of 600
+ # seconds), which means that we end up re-uploading the whole cache for
+ # every build, which is time-consuming In any case, it's not obvious that
+ # downloading the cache from S3 would be much faster than downloading the
+ # originals from pypi.
+ #
+ - $HOME/.cache/pip/wheels
-before_script:
- - git remote set-branches --add origin develop
- - git fetch origin develop
+# don't clone the whole repo history, one commit will do
+git:
+ depth: 1
-services:
- - postgresql
+# only build branches we care about (PRs are built seperately)
+branches:
+ only:
+ - master
+ - develop
+ - /^release-v/
+# When running the tox environments that call Twisted Trial, we can pass the -j
+# flag to run the tests concurrently. We set this to 2 for CPU bound tests
+# (SQLite) and 4 for I/O bound tests (PostgreSQL).
matrix:
fast_finish: true
include:
- python: 2.7
env: TOX_ENV=packaging
+ - python: 3.6
+ env: TOX_ENV="pep8,check_isort"
+
- python: 2.7
- env: TOX_ENV=pep8
+ env: TOX_ENV=py27 TRIAL_FLAGS="-j 2"
- python: 2.7
- env: TOX_ENV=py27
+ env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
- python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
+ services:
+ - postgresql
- - python: 3.6
- env: TOX_ENV=py36
+ - python: 3.5
+ env: TOX_ENV=py35 TRIAL_FLAGS="-j 2"
- python: 3.6
- env: TOX_ENV=check_isort
+ env: TOX_ENV=py36 TRIAL_FLAGS="-j 2"
- python: 3.6
+ env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
+ services:
+ - postgresql
+
+ - # we only need to check for the newsfragment if it's a PR build
+ if: type = pull_request
+ python: 3.6
env: TOX_ENV=check-newsfragment
+ script:
+ - git remote set-branches --add origin develop
+ - git fetch origin develop
+ - tox -e $TOX_ENV
install:
- pip install tox
diff --git a/CHANGES.md b/CHANGES.md
index ee864c3c..1c3d575c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,336 @@
+Synapse 0.33.9 (2018-11-19)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.9rc1 (2018-11-14)
+==============================
+
+Features
+--------
+
+- Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. ([\#4004](https://github.com/matrix-org/synapse/issues/4004), [\#4133](https://github.com/matrix-org/synapse/issues/4133), [\#4142](https://github.com/matrix-org/synapse/issues/4142), [\#4184](https://github.com/matrix-org/synapse/issues/4184))
+- Support for replacing rooms with new ones ([\#4091](https://github.com/matrix-org/synapse/issues/4091), [\#4099](https://github.com/matrix-org/synapse/issues/4099), [\#4100](https://github.com/matrix-org/synapse/issues/4100), [\#4101](https://github.com/matrix-org/synapse/issues/4101))
+
+
+Bugfixes
+--------
+
+- Fix exceptions when using the email mailer on Python 3. ([\#4095](https://github.com/matrix-org/synapse/issues/4095))
+- Fix e2e key backup with more than 9 backup versions ([\#4113](https://github.com/matrix-org/synapse/issues/4113))
+- Searches that request profile info now no longer fail with a 500. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
+- fix return code of empty key backups ([\#4123](https://github.com/matrix-org/synapse/issues/4123))
+- If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. ([\#4127](https://github.com/matrix-org/synapse/issues/4127))
+- Fix table lock of device_lists_remote_cache which could freeze the application ([\#4132](https://github.com/matrix-org/synapse/issues/4132))
+- Fix exception when using state res v2 algorithm ([\#4135](https://github.com/matrix-org/synapse/issues/4135))
+- Generating the user consent URI no longer fails on Python 3. ([\#4140](https://github.com/matrix-org/synapse/issues/4140), [\#4163](https://github.com/matrix-org/synapse/issues/4163))
+- Loading URL previews from the DB cache on Postgres will no longer cause Unicode type errors when responding to the request, and URL previews will no longer fail if the remote server returns a Content-Type header with the chartype in quotes. ([\#4157](https://github.com/matrix-org/synapse/issues/4157))
+- The hash_password script now works on Python 3. ([\#4161](https://github.com/matrix-org/synapse/issues/4161))
+- Fix noop checks when updating device keys, reducing spurious device list update notifications. ([\#4164](https://github.com/matrix-org/synapse/issues/4164))
+
+
+Deprecations and Removals
+-------------------------
+
+- The disused and un-specced identicon generator has been removed. ([\#4106](https://github.com/matrix-org/synapse/issues/4106))
+- The obsolete and non-functional /pull federation endpoint has been removed. ([\#4118](https://github.com/matrix-org/synapse/issues/4118))
+- The deprecated v1 key exchange endpoints have been removed. ([\#4119](https://github.com/matrix-org/synapse/issues/4119))
+- Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. ([\#4120](https://github.com/matrix-org/synapse/issues/4120))
+
+
+Internal Changes
+----------------
+
+- Fix build of Docker image with docker-compose ([\#3778](https://github.com/matrix-org/synapse/issues/3778))
+- Delete unreferenced state groups during history purge ([\#4006](https://github.com/matrix-org/synapse/issues/4006))
+- The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. ([\#4108](https://github.com/matrix-org/synapse/issues/4108))
+- Reduce replication traffic for device lists ([\#4109](https://github.com/matrix-org/synapse/issues/4109))
+- Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character ([\#4110](https://github.com/matrix-org/synapse/issues/4110))
+- Log some bits about room creation ([\#4121](https://github.com/matrix-org/synapse/issues/4121))
+- Fix `tox` failure on old systems ([\#4124](https://github.com/matrix-org/synapse/issues/4124))
+- Add STATE_V2_TEST room version ([\#4128](https://github.com/matrix-org/synapse/issues/4128))
+- Clean up event accesses and tests ([\#4137](https://github.com/matrix-org/synapse/issues/4137))
+- The default logging config will now set an explicit log file encoding of UTF-8. ([\#4138](https://github.com/matrix-org/synapse/issues/4138))
+- Add helpers functions for getting prev and auth events of an event ([\#4139](https://github.com/matrix-org/synapse/issues/4139))
+- Add some tests for the HTTP pusher. ([\#4149](https://github.com/matrix-org/synapse/issues/4149))
+- add purge_history.sh and purge_remote_media.sh scripts to contrib/ ([\#4155](https://github.com/matrix-org/synapse/issues/4155))
+- HTTP tests have been refactored to contain less boilerplate. ([\#4156](https://github.com/matrix-org/synapse/issues/4156))
+- Drop incoming events from federation for unknown rooms ([\#4165](https://github.com/matrix-org/synapse/issues/4165))
+
+
+Synapse 0.33.8 (2018-11-01)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.8rc2 (2018-10-31)
+==============================
+
+Bugfixes
+--------
+
+- Searches that request profile info now no longer fail with a 500. Fixes
+ a regression in 0.33.8rc1. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
+
+
+Synapse 0.33.8rc1 (2018-10-29)
+==============================
+
+Features
+--------
+
+- Servers with auto-join rooms will now automatically create those rooms when the first user registers ([\#3975](https://github.com/matrix-org/synapse/issues/3975))
+- Add config option to control alias creation ([\#4051](https://github.com/matrix-org/synapse/issues/4051))
+- The register_new_matrix_user script is now ported to Python 3. ([\#4085](https://github.com/matrix-org/synapse/issues/4085))
+- Configure Docker image to listen on both ipv4 and ipv6. ([\#4089](https://github.com/matrix-org/synapse/issues/4089))
+
+
+Bugfixes
+--------
+
+- Fix HTTP error response codes for federated group requests. ([\#3969](https://github.com/matrix-org/synapse/issues/3969))
+- Fix issue where Python 3 users couldn't paginate /publicRooms ([\#4046](https://github.com/matrix-org/synapse/issues/4046))
+- Fix URL previewing to work in Python 3.7 ([\#4050](https://github.com/matrix-org/synapse/issues/4050))
+- synctl will use the right python executable to run worker processes ([\#4057](https://github.com/matrix-org/synapse/issues/4057))
+- Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. ([\#4060](https://github.com/matrix-org/synapse/issues/4060), [\#4067](https://github.com/matrix-org/synapse/issues/4067))
+- Fix some metrics being racy and causing exceptions when polled by Prometheus. ([\#4061](https://github.com/matrix-org/synapse/issues/4061))
+- Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
+- Correctly account for cpu usage by background threads ([\#4074](https://github.com/matrix-org/synapse/issues/4074))
+- Fix race condition where config defined reserved users were not being added to
+ the monthly active user list prior to the homeserver reactor firing up ([\#4081](https://github.com/matrix-org/synapse/issues/4081))
+- Fix bug which prevented backslashes being used in event field filters ([\#4083](https://github.com/matrix-org/synapse/issues/4083))
+
+
+Internal Changes
+----------------
+
+- Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook ([\#3698](https://github.com/matrix-org/synapse/issues/3698))
+- Add initial implementation of new state resolution algorithm ([\#3786](https://github.com/matrix-org/synapse/issues/3786))
+- Reduce database load when fetching state groups ([\#4011](https://github.com/matrix-org/synapse/issues/4011))
+- Various cleanups in the federation client code ([\#4031](https://github.com/matrix-org/synapse/issues/4031))
+- Run the CircleCI builds in docker containers ([\#4041](https://github.com/matrix-org/synapse/issues/4041))
+- Only colourise synctl output when attached to tty ([\#4049](https://github.com/matrix-org/synapse/issues/4049))
+- Refactor room alias creation code ([\#4063](https://github.com/matrix-org/synapse/issues/4063))
+- Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
+- The README now contains example for the Caddy web server. Contributed by steamp0rt. ([\#4072](https://github.com/matrix-org/synapse/issues/4072))
+- Add psutil as an explicit dependency ([\#4073](https://github.com/matrix-org/synapse/issues/4073))
+- Clean up threading and logcontexts in pushers ([\#4075](https://github.com/matrix-org/synapse/issues/4075))
+- Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings ([\#4076](https://github.com/matrix-org/synapse/issues/4076))
+- Give some more things logcontexts ([\#4077](https://github.com/matrix-org/synapse/issues/4077))
+- Clean up some bits of code which were flagged by the linter ([\#4082](https://github.com/matrix-org/synapse/issues/4082))
+
+
+Synapse 0.33.7 (2018-10-18)
+===========================
+
+**Warning**: This release removes the example email notification templates from
+`res/templates` (they are now internal to the python package). This should only
+affect you if you (a) deploy your Synapse instance from a git checkout or a
+github snapshot URL, and (b) have email notifications enabled.
+
+If you have email notifications enabled, you should ensure that
+`email.template_dir` is either configured to point at a directory where you
+have installed customised templates, or leave it unset to use the default
+templates.
+
+Synapse 0.33.7rc2 (2018-10-17)
+==============================
+
+Features
+--------
+
+- Ship the example email templates as part of the package ([\#4052](https://github.com/matrix-org/synapse/issues/4052))
+
+Bugfixes
+--------
+
+- Fix bug which made get_missing_events return too few events ([\#4045](https://github.com/matrix-org/synapse/issues/4045))
+
+
+Synapse 0.33.7rc1 (2018-10-15)
+==============================
+
+Features
+--------
+
+- Add support for end-to-end key backup (MSC1687) ([\#4019](https://github.com/matrix-org/synapse/issues/4019))
+
+
+Bugfixes
+--------
+
+- Fix bug in event persistence logic which caused 'NoneType is not iterable' ([\#3995](https://github.com/matrix-org/synapse/issues/3995))
+- Fix exception in background metrics collection ([\#3996](https://github.com/matrix-org/synapse/issues/3996))
+- Fix exception handling in fetching remote profiles ([\#3997](https://github.com/matrix-org/synapse/issues/3997))
+- Fix handling of rejected threepid invites ([\#3999](https://github.com/matrix-org/synapse/issues/3999))
+- Workers now start on Python 3. ([\#4027](https://github.com/matrix-org/synapse/issues/4027))
+- Synapse now starts on Python 3.7. ([\#4033](https://github.com/matrix-org/synapse/issues/4033))
+
+
+Internal Changes
+----------------
+
+- Log exceptions in looping calls ([\#4008](https://github.com/matrix-org/synapse/issues/4008))
+- Optimisation for serving federation requests ([\#4017](https://github.com/matrix-org/synapse/issues/4017))
+- Add metric to count number of non-empty sync responses ([\#4022](https://github.com/matrix-org/synapse/issues/4022))
+
+
+Synapse 0.33.6 (2018-10-04)
+===========================
+
+Internal Changes
+----------------
+
+- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002))
+
+
+Synapse 0.33.6rc1 (2018-10-03)
+==============================
+
+Features
+--------
+
+- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883))
+- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894))
+- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916))
+- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959))
+- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964))
+
+
+Bugfixes
+--------
+
+- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578))
+- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836))
+- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868))
+- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879))
+- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889))
+- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895))
+- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899))
+- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907))
+- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908))
+- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910))
+- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914))
+- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980))
+- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936))
+- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938))
+- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956))
+- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960))
+- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961))
+- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968))
+- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970))
+- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986))
+- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990))
+
+
+Internal Changes
+----------------
+
+- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699))
+- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794))
+- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873))
+- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897))
+- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903))
+- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966))
+- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909))
+- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911))
+- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912))
+- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924))
+- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925))
+- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927))
+- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991))
+- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946))
+- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947))
+- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948))
+- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952))
+- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957))
+- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958))
+- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963))
+- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965))
+- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967))
+- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972))
+- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976))
+- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985))
+- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987))
+- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988))
+- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989))
+
+
+Synapse 0.33.5.1 (2018-09-25)
+=============================
+
+Internal Changes
+----------------
+
+- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940))
+
+
+Synapse 0.33.5 (2018-09-24)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.5rc1 (2018-09-17)
+==============================
+
+Features
+--------
+
+- Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576))
+- Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790))
+- Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846))
+
+
+Bugfixes
+--------
+
+- Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788))
+- guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800))
+- Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804))
+- Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810))
+- Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824))
+- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835))
+- Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841))
+- Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845))
+- Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851))
+- Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859))
+- ([\#3874](https://github.com/matrix-org/synapse/issues/3874))
+- Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875))
+
+
+Internal Changes
+----------------
+
+- CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704))
+- http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771))
+- Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789))
+- Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795))
+- handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803))
+- Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805))
+- Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806))
+- tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808))
+- crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822))
+- rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823))
+- add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826))
+- speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827))
+- Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834))
+- Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840))
+- federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847))
+- Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853))
+- Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855))
+- Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856))
+- Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857))
+- Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858))
+- Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860))
+- Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871))
+- Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872))
+- mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877))
+- Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888))
+
+
Synapse 0.33.4 (2018-09-07)
===========================
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index f9de78a4..6ef7d48d 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,12 +30,28 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
-We use `Jenkins <http://matrix.org/jenkins>`_ and
-`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
-integration. All pull requests to synapse get automatically tested by Travis;
-the Jenkins builds require an adminstrator to start them. If your change
-breaks the build, this will be shown in github, so please keep an eye on the
-pull request for feedback.
+We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
+<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
+pull requests to synapse get automatically tested by Travis and CircleCI.
+If your change breaks the build, this will be shown in GitHub, so please
+keep an eye on the pull request for feedback.
+
+To run unit tests in a local development environment, you can use:
+
+- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
+ SQLite-backed Synapse on Python 2.7.
+- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
+- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
+- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
+ (requires a running local PostgreSQL with access to create databases).
+- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
+ (requires Docker). Entirely self-contained, recommended if you don't want to
+ set up PostgreSQL yourself.
+
+Docker images are available for running the integration tests (SyTest) locally,
+see the `documentation in the SyTest repo
+<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
+information.
Code style
~~~~~~~~~~
@@ -77,7 +93,8 @@ AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
-from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
+from your life, please mail us your shipping address to matrix at matrix.org and
+we'll try to fix it :)
Sign off
~~~~~~~~
@@ -144,4 +161,9 @@ flag to ``git commit``, which uses the name and email set in your
Conclusion
~~~~~~~~~~
-That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
+That's it! Matrix is a very open and collaborative project as you might expect
+given our obsession with open communication. If we're going to successfully
+matrix together all the fragmented communication technologies out there we are
+reliant on contributions and collaboration from the community to do so. So
+please get involved - and we hope you have as much fun hacking on Matrix as we
+do!
diff --git a/MANIFEST.in b/MANIFEST.in
index e0826ba5..25cdf0a6 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,23 +12,20 @@ recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.py
recursive-include docs *
-recursive-include res *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
+recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
-exclude jenkins.sh
-exclude jenkins*.sh
-exclude jenkins*
exclude Dockerfile
exclude .dockerignore
-recursive-exclude jenkins *.sh
+exclude test_postgresql.sh
include pyproject.toml
recursive-include changelog.d *
@@ -37,3 +34,6 @@ prune .github
prune demo/etc
prune docker
prune .circleci
+
+exclude jenkins*
+recursive-exclude jenkins *.sh
diff --git a/MAP.rst b/MAP.rst
deleted file mode 100644
index 0f8e9818..00000000
--- a/MAP.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Directory Structure
-===================
-
-Warning: this may be a bit stale...
-
-::
-
- .
- ├── cmdclient Basic CLI python Matrix client
- ├── demo Scripts for running standalone Matrix demos
- ├── docs All doc, including the draft Matrix API spec
- │   ├── client-server The client-server Matrix API spec
- │   ├── model Domain-specific elements of the Matrix API spec
- │   ├── server-server The server-server model of the Matrix API spec
- │   └── sphinx The internal API doc of the Synapse homeserver
- ├── experiments Early experiments of using Synapse's internal APIs
- ├── graph Visualisation of Matrix's distributed message store
- ├── synapse The reference Matrix homeserver implementation
- │   ├── api Common building blocks for the APIs
- │   │   ├── events Definition of state representation Events
- │   │   └── streams Definition of streamable Event objects
- │   ├── app The __main__ entry point for the homeserver
- │   ├── crypto The PKI client/server used for secure federation
- │   │   └── resource PKI helper objects (e.g. keys)
- │   ├── federation Server-server state replication logic
- │   ├── handlers The main business logic of the homeserver
- │   ├── http Wrappers around Twisted's HTTP server & client
- │   ├── rest Servlet-style RESTful API
- │   ├── storage Persistence subsystem (currently only sqlite3)
- │   │   └── schema sqlite persistence schema
- │   └── util Synapse-specific utilities
- ├── tests Unit tests for the Synapse homeserver
- └── webclient Basic AngularJS Matrix web client
-
-
diff --git a/README.rst b/README.rst
index d6f34ba9..9165db83 100644
--- a/README.rst
+++ b/README.rst
@@ -81,7 +81,7 @@ Thanks for using Matrix!
Synapse Installation
====================
-Synapse is the reference python/twisted Matrix homeserver implementation.
+Synapse is the reference Python/Twisted Matrix homeserver implementation.
System requirements:
@@ -91,12 +91,13 @@ System requirements:
Installing from source
----------------------
+
(Prebuilt packages are available for some platforms - see `Platform-Specific
Instructions`_.)
-Synapse is written in python but some of the libraries it uses are written in
-C. So before we can install synapse itself we need a working C compiler and the
-header files for python C extensions.
+Synapse is written in Python but some of the libraries it uses are written in
+C. So before we can install Synapse itself we need a working C compiler and the
+header files for Python C extensions.
Installing prerequisites on Ubuntu or Debian::
@@ -143,21 +144,27 @@ Installing prerequisites on OpenBSD::
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
libxslt
-To install the synapse homeserver run::
+To install the Synapse homeserver run::
virtualenv -p python2.7 ~/.synapse
source ~/.synapse/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
- pip install https://github.com/matrix-org/synapse/tarball/master
+ pip install matrix-synapse
-This installs synapse, along with the libraries it uses, into a virtual
+This installs Synapse, along with the libraries it uses, into a virtual
environment under ``~/.synapse``. Feel free to pick a different directory
if you prefer.
+This Synapse installation can then be later upgraded by using pip again with the
+update flag::
+
+ source ~/.synapse/bin/activate
+ pip install -U matrix-synapse
+
In case of problems, please see the _`Troubleshooting` section below.
-There is an offical synapse image available at
+There is an offical synapse image available at
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
this including configuration options is available in the README on
@@ -167,7 +174,13 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
-Configuring synapse
+Slavi Pantaleev has created an Ansible playbook,
+which installs the offical Docker image of Matrix Synapse
+along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
+For more details, see
+https://github.com/spantaleev/matrix-docker-ansible-deploy
+
+Configuring Synapse
-------------------
Before you can start Synapse, you will need to generate a configuration
@@ -249,26 +262,6 @@ Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See `<docs/turn-howto.rst>`_ for details.
-IPv6
-----
-
-As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
-for providing PR #1696.
-
-However, for federation to work on hosts with IPv6 DNS servers you **must**
-be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
-for details. We can't make Synapse depend on Twisted 17.1 by default
-yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
-so if you are using operating system dependencies you'll have to install your
-own Twisted 17.1 package via pip or backports etc.
-
-If you're running in a virtualenv then pip should have installed the newest
-Twisted automatically, but if your virtualenv is old you will need to manually
-upgrade to a newer Twisted dependency via:
-
- pip install Twisted>=17.1.0
-
-
Running Synapse
===============
@@ -444,8 +437,7 @@ settings require a slightly more difficult installation process.
using the ``.`` command, rather than ``bash``'s ``source``.
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
webpages for their titles.
-6) Use ``pip`` to install this repository: ``pip install
- https://github.com/matrix-org/synapse/tarball/master``
+6) Use ``pip`` to install this repository: ``pip install matrix-synapse``
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
chance of a compromised Synapse server being used to take over your box.
@@ -459,37 +451,13 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
Windows Install
---------------
-Synapse can be installed on Cygwin. It requires the following Cygwin packages:
-
-- gcc
-- git
-- libffi-devel
-- openssl (and openssl-devel, python-openssl)
-- python
-- python-setuptools
-
-The content repository requires additional packages and will be unable to process
-uploads without them:
-
-- libjpeg8
-- libjpeg8-devel
-- zlib
-
-If you choose to install Synapse without these packages, you will need to reinstall
-``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
-pillow --user``
-
-Troubleshooting:
-
-- You may need to upgrade ``setuptools`` to get this to work correctly:
- ``pip install setuptools --upgrade``.
-- You may encounter errors indicating that ``ffi.h`` is missing, even with
- ``libffi-devel`` installed. If you do, copy the ``.h`` files:
- ``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include``
-- You may need to install libsodium from source in order to install PyNacl. If
- you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
- it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
+If you wish to run or develop Synapse on Windows, the Windows Subsystem For
+Linux provides a Linux environment on Windows 10 which is capable of using the
+Debian, Fedora, or source installation methods. More information about WSL can
+be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
+Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
+for Windows Server.
Troubleshooting
===============
@@ -497,7 +465,7 @@ Troubleshooting
Troubleshooting Installation
----------------------------
-Synapse requires pip 1.7 or later, so if your OS provides too old a version you
+Synapse requires pip 8 or later, so if your OS provides too old a version you
may need to manually upgrade it::
sudo pip install --upgrade pip
@@ -532,28 +500,6 @@ failing, e.g.::
pip install twisted
-On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
-will need to export CFLAGS=-Qunused-arguments.
-
-Troubleshooting Running
------------------------
-
-If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
-to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
-encryption and digital signatures.
-Unfortunately PyNACL currently has a few issues
-(https://github.com/pyca/pynacl/issues/53) and
-(https://github.com/pyca/pynacl/issues/79) that mean it may not install
-correctly, causing all tests to fail with errors about missing "sodium.h". To
-fix try re-installing from PyPI or directly from
-(https://github.com/pyca/pynacl)::
-
- # Install from PyPI
- pip install --user --upgrade --force pynacl
-
- # Install from github
- pip install --user https://github.com/pyca/pynacl/tarball/master
-
Running out of File Handles
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -711,7 +657,8 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
-`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
+`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
+`Caddy <https://caddyserver.com/docs/proxy>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
@@ -742,6 +689,26 @@ so an example nginx configuration might look like::
}
}
+an example Caddy configuration might look like::
+
+ matrix.example.com {
+ proxy /_matrix http://localhost:8008 {
+ transparent
+ }
+ }
+
+and an example Apache configuration might look like::
+
+ <VirtualHost *:443>
+ SSLEngine on
+ ServerName matrix.example.com;
+
+ <Location /_matrix>
+ ProxyPass http://127.0.0.1:8008/_matrix nocanon
+ ProxyPassReverse http://127.0.0.1:8008/_matrix
+ </Location>
+ </VirtualHost>
+
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
recorded correctly.
@@ -896,7 +863,7 @@ to install using pip and a virtualenv::
virtualenv -p python2.7 env
source env/bin/activate
- python synapse/python_dependencies.py | xargs pip install
+ python -m synapse.python_dependencies | xargs pip install
pip install lxml mock
This will run a process of downloading and installing all the needed
@@ -951,5 +918,13 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage
in memory constrained enviroments, or increased if performance starts to
degrade.
+Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
+improvement in overall amount, and especially in terms of giving back RAM
+to the OS. To use it, the library must simply be put in the LD_PRELOAD
+environment variable when launching Synapse. On Debian, this can be done
+by installing the ``libjemalloc1`` package and adding this line to
+``/etc/default/matrix-synapse``::
+
+ LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
diff --git a/UPGRADE.rst b/UPGRADE.rst
index f6bb1070..55c77eed 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
.. code:: bash
- pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
+ pip install --upgrade --process-dependency-links matrix-synapse
# restart synapse
synctl restart
@@ -48,11 +48,24 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
-Upgrading to $NEXT_VERSION
+Upgrading to v0.33.7
+====================
+
+This release removes the example email notification templates from
+``res/templates`` (they are now internal to the python package). This should
+only affect you if you (a) deploy your Synapse instance from a git checkout or
+a github snapshot URL, and (b) have email notifications enabled.
+
+If you have email notifications enabled, you should ensure that
+``email.template_dir`` is either configured to point at a directory where you
+have installed customised templates, or leave it unset to use the default
+templates.
+
+Upgrading to v0.27.3
====================
This release expands the anonymous usage stats sent if the opt-in
-``report_stats`` configuration is set to ``true``. We now capture RSS memory
+``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 3a8dfbae..2c1f0671 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -6,9 +6,11 @@ version: '3'
services:
synapse:
- build: ../..
+ build:
+ context: ../..
+ dockerfile: docker/Dockerfile
image: docker.io/matrixdotorg/synapse:latest
- # Since snyapse does not retry to connect to the database, restart upon
+ # Since synapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
@@ -47,4 +49,4 @@ services:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
- # - /path/to/ssd/storage:/var/lib/postfesql/data
+ # - /path/to/ssd/storage:/var/lib/postgresql/data
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index c5861259..dc3f4a1d 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -14,7 +14,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
- "version": "5.2.0"
+ "version": "5.2.4"
},
{
"type": "panel",
@@ -54,7 +54,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
- "iteration": 1533598785368,
+ "iteration": 1537878047048,
"links": [
{
"asDropdown": true,
@@ -86,7 +86,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
@@ -118,7 +118,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "process_cpu_seconds:rate2m{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} ",
@@ -179,7 +179,7 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"gridPos": {
"h": 9,
"w": 12,
@@ -525,7 +525,7 @@
"x": 0,
"y": 25
},
- "id": 48,
+ "id": 50,
"legend": {
"avg": false,
"current": false,
@@ -549,8 +549,9 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+ "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])",
"format": "time_series",
+ "interval": "",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}}",
"refId": "A",
@@ -560,7 +561,7 @@
"thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "Avg time waiting for db conn",
+ "title": "Avg reactor tick time",
"tooltip": {
"shared": true,
"sort": 0,
@@ -576,12 +577,11 @@
},
"yaxes": [
{
- "decimals": null,
"format": "s",
- "label": "",
+ "label": null,
"logBase": 1,
"max": null,
- "min": "0",
+ "min": null,
"show": true
},
{
@@ -604,6 +604,7 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan",
"fill": 1,
"gridPos": {
"h": 7,
@@ -611,7 +612,7 @@
"x": 12,
"y": 25
},
- "id": 49,
+ "id": 105,
"legend": {
"avg": false,
"current": false,
@@ -629,33 +630,47 @@
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/^up/",
- "legend": false,
- "yaxis": 2
- }
- ],
+ "seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
+ "legendFormat": "{{job}}-{{index}} 99%",
"refId": "A",
"step": 20
+ },
+ {
+ "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 95%",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 90%",
+ "refId": "C"
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "Prometheus scrape time",
+ "title": "Reactor tick quantiles",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -673,16 +688,15 @@
"label": null,
"logBase": 1,
"max": null,
- "min": "0",
+ "min": null,
"show": true
},
{
- "decimals": 0,
- "format": "none",
- "label": "",
+ "format": "short",
+ "label": null,
"logBase": 1,
- "max": "0",
- "min": "-1",
+ "max": null,
+ "min": null,
"show": false
}
],
@@ -697,14 +711,14 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fill": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 32
},
- "id": 50,
+ "id": 53,
"legend": {
"avg": false,
"current": false,
@@ -728,19 +742,17 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])",
+ "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
- "interval": "",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
+ "refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "Avg reactor tick time",
+ "title": "Up",
"tooltip": {
"shared": true,
"sort": 0,
@@ -756,7 +768,7 @@
},
"yaxes": [
{
- "format": "s",
+ "format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -769,7 +781,7 @@
"logBase": 1,
"max": null,
"min": null,
- "show": false
+ "show": true
}
],
"yaxis": {
@@ -783,26 +795,19 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "editable": true,
- "error": false,
"fill": 1,
- "grid": {},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 32
},
- "id": 5,
+ "id": 49,
"legend": {
- "alignAsTable": false,
"avg": false,
"current": false,
- "hideEmpty": false,
- "hideZero": false,
"max": false,
"min": false,
- "rightSide": false,
"show": true,
"total": false,
"values": false
@@ -817,10 +822,9 @@
"renderer": "flot",
"seriesOverrides": [
{
- "alias": "/user/"
- },
- {
- "alias": "/system/"
+ "alias": "/^up/",
+ "legend": false,
+ "yaxis": 2
}
],
"spaceLength": 10,
@@ -828,44 +832,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} system ",
- "metric": "",
- "refId": "B",
- "step": 20
- },
- {
- "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
- "hide": false,
"interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} user",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
"refId": "A",
"step": 20
}
],
- "thresholds": [
- {
- "colorMode": "custom",
- "line": true,
- "lineColor": "rgba(216, 200, 27, 0.27)",
- "op": "gt",
- "value": 0.5
- },
- {
- "colorMode": "custom",
- "line": true,
- "lineColor": "rgba(234, 112, 112, 0.22)",
- "op": "gt",
- "value": 0.8
- }
- ],
+ "thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "CPU",
+ "title": "Prometheus scrape time",
"tooltip": {
"shared": true,
"sort": 0,
@@ -881,20 +860,21 @@
},
"yaxes": [
{
- "decimals": null,
- "format": "percentunit",
- "label": "",
+ "format": "s",
+ "label": null,
"logBase": 1,
- "max": "1.2",
- "min": 0,
+ "max": null,
+ "min": "0",
"show": true
},
{
- "format": "short",
+ "decimals": 0,
+ "format": "none",
+ "label": "",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "max": "0",
+ "min": "-1",
+ "show": false
}
],
"yaxis": {
@@ -907,20 +887,27 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fill": 0,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 39
},
- "id": 53,
+ "id": 5,
"legend": {
+ "alignAsTable": false,
"avg": false,
"current": false,
+ "hideEmpty": false,
+ "hideZero": false,
"max": false,
"min": false,
+ "rightSide": false,
"show": true,
"total": false,
"values": false
@@ -933,23 +920,57 @@
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "/user/"
+ },
+ {
+ "alias": "/system/"
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A"
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} system ",
+ "metric": "",
+ "refId": "B",
+ "step": 20
+ },
+ {
+ "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} user",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "line": true,
+ "lineColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 0.5
+ },
+ {
+ "colorMode": "custom",
+ "line": true,
+ "lineColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 0.8
}
],
- "thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "Up",
+ "title": "CPU",
"tooltip": {
"shared": true,
"sort": 0,
@@ -965,16 +986,16 @@
},
"yaxes": [
{
- "format": "short",
- "label": null,
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
"logBase": 1,
- "max": null,
- "min": null,
+ "max": "1.2",
+ "min": 0,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
"max": null,
"min": null,
@@ -1013,7 +1034,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 49
+ "y": 47
},
"id": 40,
"legend": {
@@ -1098,7 +1119,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 49
+ "y": 47
},
"id": 46,
"legend": {
@@ -1187,7 +1208,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 56
+ "y": 54
},
"id": 44,
"legend": {
@@ -1276,7 +1297,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 56
+ "y": 54
},
"id": 45,
"legend": {
@@ -1383,7 +1404,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 48
+ "y": 62
},
"id": 4,
"legend": {
@@ -1490,7 +1511,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 48
+ "y": 62
},
"id": 32,
"legend": {
@@ -1578,7 +1599,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 56
+ "y": 70
},
"id": 23,
"legend": {
@@ -1688,7 +1709,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 56
+ "y": 70
},
"id": 52,
"legend": {
@@ -1795,7 +1816,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 64
+ "y": 78
},
"id": 7,
"legend": {
@@ -1886,7 +1907,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 64
+ "y": 78
},
"id": 47,
"legend": {
@@ -1969,13 +1990,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 72
+ "y": 86
},
"id": 103,
"legend": {
@@ -2069,13 +2090,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 23
+ "y": 49
},
"id": 99,
"legend": {
@@ -2154,13 +2175,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 23
+ "y": 49
},
"id": 101,
"legend": {
@@ -2186,17 +2207,24 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
+ "hide": false,
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{name}}",
"refId": "A"
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "DB usage by background jobs",
+ "title": "DB usage by background jobs (including scheduling time)",
"tooltip": {
"shared": true,
"sort": 0,
@@ -2252,13 +2280,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 25
+ "y": 64
},
"id": 79,
"legend": {
@@ -2336,13 +2364,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 25
+ "y": 64
},
"id": 83,
"legend": {
@@ -2447,7 +2475,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 23
+ "y": 65
},
"id": 51,
"legend": {
@@ -2551,6 +2579,194 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 24
+ },
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Avg time waiting for db conn",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 24
+ },
+ "id": 104,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{job}} {{index}} 99%",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}} {{index}} 95%",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}} {{index}} 90%",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Db scheduling time quantiles",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2559,7 +2775,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 25
+ "y": 31
},
"id": 10,
"legend": {
@@ -2648,7 +2864,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 25
+ "y": 31
},
"id": 11,
"legend": {
@@ -2672,11 +2888,11 @@
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
- "stack": false,
+ "stack": true,
"steppedLine": true,
"targets": [
{
- "expr": "topk(5, rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"instant": false,
"interval": "",
@@ -2753,7 +2969,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 17
+ "y": 60
},
"id": 12,
"legend": {
@@ -2841,7 +3057,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 17
+ "y": 60
},
"id": 26,
"legend": {
@@ -2929,7 +3145,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 30
+ "y": 73
},
"id": 13,
"legend": {
@@ -3017,7 +3233,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 30
+ "y": 73
},
"id": 27,
"legend": {
@@ -3105,7 +3321,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 43
+ "y": 86
},
"id": 28,
"legend": {
@@ -3192,7 +3408,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 43
+ "y": 86
},
"id": 25,
"legend": {
@@ -3295,7 +3511,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 55
+ "y": 68
},
"id": 1,
"legend": {
@@ -3387,7 +3603,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 55
+ "y": 68
},
"id": 8,
"legend": {
@@ -3477,7 +3693,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 65
+ "y": 78
},
"id": 38,
"legend": {
@@ -3563,7 +3779,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 65
+ "y": 78
},
"id": 39,
"legend": {
@@ -3643,13 +3859,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 75
+ "y": 88
},
"id": 65,
"legend": {
@@ -3745,13 +3961,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 90
+ "y": 27
},
"id": 91,
"legend": {
@@ -3841,7 +4057,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 90
+ "y": 27
},
"id": 21,
"legend": {
@@ -3920,13 +4136,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 99
+ "y": 36
},
"id": 89,
"legend": {
@@ -4006,13 +4222,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 99
+ "y": 36
},
"id": 93,
"legend": {
@@ -4027,7 +4243,7 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
@@ -4090,13 +4306,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 108
+ "y": 45
},
"id": 95,
"legend": {
@@ -4189,7 +4405,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 108
+ "y": 45
},
"heatmap": {},
"highlightCards": true,
@@ -4251,13 +4467,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 19
+ "y": 97
},
"id": 2,
"legend": {
@@ -4357,20 +4573,24 @@
"min": null,
"show": true
}
- ]
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 19
+ "y": 97
},
"id": 41,
"legend": {
@@ -4439,20 +4659,24 @@
"min": null,
"show": true
}
- ]
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 26
+ "y": 104
},
"id": 42,
"legend": {
@@ -4520,20 +4744,24 @@
"min": null,
"show": true
}
- ]
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 26
+ "y": 104
},
"id": 43,
"legend": {
@@ -4601,7 +4829,11 @@
"min": null,
"show": true
}
- ]
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
"repeat": null,
@@ -4623,7 +4855,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
@@ -4644,7 +4876,7 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
@@ -4708,7 +4940,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": "$datasource",
"fill": 1,
"gridPos": {
"h": 9,
@@ -4729,7 +4961,7 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
@@ -4856,9 +5088,19 @@
"selected": false,
"text": "5m",
"value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "15m",
+ "value": "15m"
}
],
- "query": "30s,1m,2m,5m",
+ "query": "30s,1m,2m,5m,10m,15m",
"refresh": 2,
"type": "interval"
},
@@ -4872,7 +5114,7 @@
"multi": false,
"name": "instance",
"options": [],
- "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, instance)",
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)",
"refresh": 2,
"regex": "",
"sort": 0,
@@ -4895,7 +5137,7 @@
"multiFormat": "regex values",
"name": "job",
"options": [],
- "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, job)",
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)",
"refresh": 2,
"refresh_on_load": false,
"regex": "",
@@ -4919,7 +5161,7 @@
"multiFormat": "regex values",
"name": "index",
"options": [],
- "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, index)",
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)",
"refresh": 2,
"refresh_on_load": false,
"regex": "",
@@ -4965,5 +5207,5 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "version": 127
+ "version": 3
} \ No newline at end of file
diff --git a/contrib/purge_api/README.md b/contrib/purge_api/README.md
new file mode 100644
index 00000000..000bf35c
--- /dev/null
+++ b/contrib/purge_api/README.md
@@ -0,0 +1,16 @@
+Purge history API examples
+==========================
+
+# `purge_history.sh`
+
+A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
+purge all messages in a list of rooms up to a certain event. You can select a
+timeframe or a number of messages that you want to keep in the room.
+
+Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
+the script.
+
+# `purge_remote_media.sh`
+
+A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
+purge all old cached remote media.
diff --git a/contrib/purge_api/purge_history.sh b/contrib/purge_api/purge_history.sh
new file mode 100644
index 00000000..e7dd5d64
--- /dev/null
+++ b/contrib/purge_api/purge_history.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+
+# this script will use the api:
+# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
+#
+# It will purge all messages in a list of rooms up to a cetrain event
+
+###################################################################################################
+# define your domain and admin user
+###################################################################################################
+# add this user as admin in your home server:
+DOMAIN=yourserver.tld
+# add this user as admin in your home server:
+ADMIN="@you_admin_username:$DOMAIN"
+
+API_URL="$DOMAIN:8008/_matrix/client/r0"
+
+###################################################################################################
+#choose the rooms to prune old messages from (add a free comment at the end)
+###################################################################################################
+# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
+ROOMS_ARRAY=(
+'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
+'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
+)
+
+# ALTERNATIVELY:
+# you can select all the rooms that are not encrypted and loop over the result:
+# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
+# or
+# select all rooms with at least 100 members:
+# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
+# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
+
+###################################################################################################
+# evaluate the EVENT_ID before which should be pruned
+###################################################################################################
+# choose a time before which the messages should be pruned:
+TIME='12 months ago'
+# ALTERNATIVELY:
+# a certain time:
+# TIME='2016-08-31 23:59:59'
+
+# creates a timestamp from the given time string:
+UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
+
+# ALTERNATIVELY:
+# prune all messages that are older than 1000 messages ago:
+# LAST_MESSAGES=1000
+# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
+
+# ALTERNATIVELY:
+# select the EVENT_ID manually:
+#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
+
+###################################################################################################
+# make the admin user a server admin in the database with
+###################################################################################################
+# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
+
+###################################################################################################
+# database function
+###################################################################################################
+sql (){
+ # for sqlite3:
+ #sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
+ # for postgres:
+ psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
+}
+
+###################################################################################################
+# get an access token
+###################################################################################################
+# for example externally by watching Riot in your browser's network inspector
+# or internally on the server locally, use this:
+TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
+AUTH="Authorization: Bearer $TOKEN"
+
+###################################################################################################
+# check, if your TOKEN works. For example this works:
+###################################################################################################
+# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
+
+###################################################################################################
+# finally start pruning the room:
+###################################################################################################
+POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
+
+for ROOM in "${ROOMS_ARRAY[@]}"; do
+ echo "########################################### $(date) ################# "
+ echo "pruning room: $ROOM ..."
+ ROOM=${ROOM%#*}
+ #set -x
+ echo "check for alias in db..."
+ # for postgres:
+ sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
+ echo "get event..."
+ # for postgres:
+ EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
+ if [ "$EVENT_ID" == "" ]; then
+ echo "no event $TIME"
+ else
+ echo "event: $EVENT_ID"
+ SLEEP=2
+ set -x
+ # call purge
+ OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
+ PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
+ if [ "$PURGE_ID" == "" ]; then
+ # probably the history purge is already in progress for $ROOM
+ : "continuing with next room"
+ else
+ while : ; do
+ # get status of purge and sleep longer each time if still active
+ sleep $SLEEP
+ STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
+ : "$ROOM --> Status: $STATUS"
+ [[ "$STATUS" == "active" ]] || break
+ SLEEP=$((SLEEP + 1))
+ done
+ fi
+ set +x
+ sleep 1
+ fi
+done
+
+
+###################################################################################################
+# additionally
+###################################################################################################
+# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
+# This can take a very long time (hours) and the client have to be stopped while you do so:
+# $ synctl stop
+# $ sqlite3 -line homeserver.db "vacuum;"
+# $ synctl start
+
+# This could be set, so you don't need to prune every time after deleting some rows:
+# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
+# be cautious, it could make the database somewhat slow if there are a lot of deletions
+
+exit
diff --git a/contrib/purge_api/purge_remote_media.sh b/contrib/purge_api/purge_remote_media.sh
new file mode 100644
index 00000000..99c07c66
--- /dev/null
+++ b/contrib/purge_api/purge_remote_media.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+DOMAIN=yourserver.tld
+# add this user as admin in your home server:
+ADMIN="@you_admin_username:$DOMAIN"
+
+API_URL="$DOMAIN:8008/_matrix/client/r0"
+
+# choose a time before which the messages should be pruned:
+# TIME='2016-08-31 23:59:59'
+TIME='12 months ago'
+
+# creates a timestamp from the given time string:
+UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
+
+
+###################################################################################################
+# database function
+###################################################################################################
+sql (){
+ # for sqlite3:
+ #sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
+ # for postgres:
+ psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
+}
+
+###############################################################################
+# make the admin user a server admin in the database with
+###############################################################################
+# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
+
+###############################################################################
+# get an access token
+###############################################################################
+# for example externally by watching Riot in your browser's network inspector
+# or internally on the server locally, use this:
+TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
+
+###############################################################################
+# check, if your TOKEN works. For example this works:
+###############################################################################
+# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
+
+###############################################################################
+# optional check size before
+###############################################################################
+# echo calculate used storage before ...
+# du -shc ../.synapse/media_store/*
+
+###############################################################################
+# finally start pruning media:
+###############################################################################
+set -x # for debugging the generated string
+curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
diff --git a/debian/changelog b/debian/changelog
index 619baa0a..047d3246 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,24 @@
+matrix-synapse (0.33.9-2~bpo9+1) stretch-backports; urgency=medium
+
+ * Rebuild for stretch-backports.
+
+ -- Andrej Shadura <andrewsh@debian.org> Tue, 18 Dec 2018 16:54:29 +0100
+
+matrix-synapse (0.33.9-2) unstable; urgency=medium
+
+ * New upstream release.
+
+ -- Andrej Shadura <andrewsh@debian.org> Fri, 23 Nov 2018 18:39:57 +0100
+
+matrix-synapse (0.33.8-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Refresh patches, update dependencies.
+ * Normalise whitespace.
+ * Wrap and sort debian/control.
+
+ -- Andrej Shadura <andrewsh@debian.org> Fri, 02 Nov 2018 12:32:34 +0100
+
matrix-synapse (0.33.4-1~bpo9+1) stretch-backports; urgency=medium
* Rebuild for stretch-backports.
diff --git a/debian/control b/debian/control
index 702878e3..a09ea6b1 100644
--- a/debian/control
+++ b/debian/control
@@ -10,30 +10,31 @@ Build-Depends:
libjs-jquery,
po-debconf,
python (>= 2.6.6-3),
- python-attr,
+ python-attr (>= 16.0.0),
python-bcrypt,
python-blist,
- python-canonicaljson (>=1.1.3),
+ python-canonicaljson (>= 1.1.3),
python-daemonize,
python-frozendict (>= 0.4),
python-jsonschema (>= 2.5.1),
python-mock,
- python-msgpack (>=0.3.0),
- python-netaddr (>=0.7.18),
+ python-msgpack (>= 0.3.0),
python-nacl (>= 0.3.0),
+ python-netaddr (>= 0.7.18),
python-openssl (>= 0.14),
- python-pil,
python-phonenumbers,
+ python-pil,
python-prometheus-client,
+ python-psutil,
python-pyasn1,
- python-pydenticon,
python-pymacaroons,
python-pysaml2,
python-service-identity (>= 1.0.0),
python-setuptools (>= 0.6b3),
- python-sortedcontainers,
- python-six,
python-signedjson (>= 1.0.0),
+ python-six,
+ python-sortedcontainers,
+ python-treq (>= 15.1.0),
python-twisted (>= 17.1.0),
python-unpaddedbase64 (>= 1.0.1),
python-yaml
@@ -50,20 +51,20 @@ Depends:
debconf,
libjs-jquery,
lsb-base (>= 3.0-6),
- python-canonicaljson (>=1.1.3),
- python-twisted (>= 17.1.0),
+ python-canonicaljson (>= 1.1.3),
python-nacl (>= 1.2.1),
python-pymacaroons,
python-pysaml2 (>= 4.0.0),
python-systemd,
+ python-twisted (>= 17.1.0),
${misc:Depends},
${python:Depends}
Suggests:
python-bleach (>= 1.4.2),
python-jinja2 (>= 2.8)
Recommends:
- python-psycopg2,
python-lxml,
+ python-psycopg2,
# python-priority,
# python-h2 (>= 3.0.0)
Description: Matrix reference homeserver
diff --git a/debian/patches/0001-tox.patch b/debian/patches/0001-tox.patch
deleted file mode 100644
index f153879f..00000000
--- a/debian/patches/0001-tox.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-From: Erik Johnston <erikj@matrix.org>
-Date: Fri, 10 Jun 2016 10:57:07 +0100
-Subject: tox
-
----
- tox.ini | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/tox.ini b/tox.ini
-index f408def..4a40b2a 100644
---- a/tox.ini
-+++ b/tox.ini
-@@ -1,5 +1,6 @@
- [tox]
- envlist = packaging, py27, py36, pep8, check_isort
-+sitepackages = True
-
- [base]
- deps =
diff --git a/debian/patches/0002-change_instructions.patch b/debian/patches/0002-change_instructions.patch
index a51640ce..98af631f 100644
--- a/debian/patches/0002-change_instructions.patch
+++ b/debian/patches/0002-change_instructions.patch
@@ -10,7 +10,7 @@ diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 1ab5593..14052a9 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
-@@ -31,6 +31,11 @@ class ConfigError(Exception):
+@@ -32,6 +32,11 @@
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
Please opt in or out of reporting anonymized homeserver usage statistics, by
setting the `report_stats` key in your config file to either True or False.
@@ -22,7 +22,7 @@ index 1ab5593..14052a9 100644
"""
MISSING_REPORT_STATS_SPIEL = """\
-@@ -45,6 +50,11 @@
+@@ -46,6 +51,11 @@
MISSING_SERVER_NAME = """\
Missing mandatory `server_name` config option.
diff --git a/debian/patches/0004-webclient-instructions.patch b/debian/patches/0004-webclient-instructions.patch
index c14c047d..6ab650ff 100644
--- a/debian/patches/0004-webclient-instructions.patch
+++ b/debian/patches/0004-webclient-instructions.patch
@@ -8,7 +8,7 @@ Subject: webclient-instructions
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
-@@ -86,12 +86,11 @@
+@@ -92,12 +92,11 @@
"Please either install the matrix-angular-sdk or configure\n"
"the location of the source to serve via the configuration\n"
"option `web_client_location`\n\n"
diff --git a/debian/patches/0005-Honour-config.web_client.patch b/debian/patches/0005-Honour-config.web_client.patch
index 3bb2ac3c..71b23ade 100644
--- a/debian/patches/0005-Honour-config.web_client.patch
+++ b/debian/patches/0005-Honour-config.web_client.patch
@@ -10,7 +10,7 @@ diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 5a0329b..492b05f 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
-@@ -109,7 +109,7 @@ class SynapseHomeServer(HomeServer):
+@@ -126,7 +126,7 @@
for res in listener_config["resources"]:
for name in res["names"]:
resources.update(self._configure_named_resource(
@@ -19,7 +19,7 @@ index 5a0329b..492b05f 100755
))
additional_resources = listener_config.get("additional_resources", {})
-@@ -154,7 +154,7 @@ class SynapseHomeServer(HomeServer):
+@@ -173,7 +173,7 @@
)
logger.info("Synapse now listening on port %d", port)
@@ -28,9 +28,9 @@ index 5a0329b..492b05f 100755
"""Build a resource map for a named resource
Args:
-@@ -212,7 +212,7 @@ class SynapseHomeServer(HomeServer):
- SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
- })
+@@ -240,7 +240,7 @@
+ if name in ["keys", "federation"]:
+ resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
- if name == "webclient":
+ if name == "webclient" and config.web_client:
diff --git a/debian/patches/0006-Avoid-pip-install.patch b/debian/patches/0006-Avoid-pip-install.patch
index ccae9d21..87961904 100644
--- a/debian/patches/0006-Avoid-pip-install.patch
+++ b/debian/patches/0006-Avoid-pip-install.patch
@@ -10,7 +10,7 @@ Bug: https://github.com/matrix-org/synapse/issues/3743
@@ -25,8 +25,8 @@
except python_dependencies.MissingRequirementError as e:
message = "\n".join([
- "Missing Requirement: %s" % (e.message,),
+ "Missing Requirement: %s" % (str(e),),
- "To install run:",
- " pip install --upgrade --force \"%s\"" % (e.dependency,),
+ "To install, try:",
@@ -18,8 +18,8 @@ Bug: https://github.com/matrix-org/synapse/issues/3743
"",
])
sys.stderr.writelines(message)
---- a/synapse/config/jwt.py
-+++ b/synapse/config/jwt.py
+--- a/synapse/config/jwt_config.py
++++ b/synapse/config/jwt_config.py
@@ -19,7 +19,7 @@
"""Missing jwt library. This is required for jwt login.
diff --git a/debian/patches/series b/debian/patches/series
index c72ebf99..aa0b4090 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,4 +1,3 @@
-0001-tox.patch
0002-change_instructions.patch
0004-webclient-instructions.patch
0005-Honour-config.web_client.patch
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 77797621..db44c02a 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,6 +1,13 @@
-FROM docker.io/python:2-alpine3.8
+ARG PYTHON_VERSION=2
-RUN apk add --no-cache --virtual .nacl_deps \
+###
+### Stage 0: builder
+###
+FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
+
+# install the OS build deps
+
+RUN apk add \
build-base \
libffi-dev \
libjpeg-turbo-dev \
@@ -8,25 +15,46 @@ RUN apk add --no-cache --virtual .nacl_deps \
libxslt-dev \
linux-headers \
postgresql-dev \
- su-exec \
zlib-dev
-COPY . /synapse
+# build things which have slow build steps, before we copy synapse, so that
+# the layer can be cached.
+#
+# (we really just care about caching a wheel here, as the "pip install" below
+# will install them again.)
+
+RUN pip install --prefix="/install" --no-warn-script-location \
+ cryptography \
+ msgpack-python \
+ pillow \
+ pynacl
+
+# now install synapse and all of the python deps to /install.
-# A wheel cache may be provided in ./cache for faster build
-RUN cd /synapse \
- && pip install --upgrade \
+COPY . /synapse
+RUN pip install --prefix="/install" --no-warn-script-location \
lxml \
- pip \
psycopg2 \
- setuptools \
- && mkdir -p /synapse/cache \
- && pip install -f /synapse/cache --upgrade --process-dependency-links . \
- && mv /synapse/docker/start.py /synapse/docker/conf / \
- && rm -rf \
- setup.cfg \
- setup.py \
- synapse
+ /synapse
+
+###
+### Stage 1: runtime
+###
+
+FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
+
+RUN apk add --no-cache --virtual .runtime_deps \
+ libffi \
+ libjpeg-turbo \
+ libressl \
+ libxslt \
+ libpq \
+ zlib \
+ su-exec
+
+COPY --from=builder /install /usr/local
+COPY ./docker/start.py /start.py
+COPY ./docker/conf /conf
VOLUME ["/data"]
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
new file mode 100644
index 00000000..7da8eeb9
--- /dev/null
+++ b/docker/Dockerfile-pgtests
@@ -0,0 +1,12 @@
+# Use the Sytest image that comes with a lot of the build dependencies
+# pre-installed
+FROM matrixdotorg/sytest:latest
+
+# The Sytest image doesn't come with python, so install that
+RUN apt-get -qq install -y python python-dev python-pip
+
+# We need tox to run the tests in run_pg_tests.sh
+RUN pip install tox
+
+ADD run_pg_tests.sh /pg_tests.sh
+ENTRYPOINT /pg_tests.sh
diff --git a/docker/README.md b/docker/README.md
index 038c78f7..3c00d1e9 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -88,6 +88,7 @@ variables are available for configuration:
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
uris to enable TURN for this homeserver.
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
+* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
Shared secrets, that will be initialized to random values if not set:
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index 6bc25bb4..1b0f655d 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -21,7 +21,7 @@ listeners:
{% if not SYNAPSE_NO_TLS %}
-
port: 8448
- bind_addresses: ['0.0.0.0']
+ bind_addresses: ['::']
type: http
tls: true
x_forwarded: false
@@ -34,7 +34,7 @@ listeners:
- port: 8008
tls: false
- bind_addresses: ['0.0.0.0']
+ bind_addresses: ['::']
type: http
x_forwarded: false
@@ -85,7 +85,7 @@ federation_rc_concurrent: 3
media_store_path: "/data/media"
uploads_path: "/data/uploads"
-max_upload_size: "10M"
+max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false
@@ -211,7 +211,9 @@ email:
require_transport_security: False
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
app_name: Matrix
- template_dir: res/templates
+ # if template_dir is unset, uses the example templates that are part of
+ # the Synapse distribution.
+ #template_dir: res/templates
notif_template_html: notif_mail.html
notif_template_text: notif_mail.txt
notif_for_new_users: True
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
new file mode 100755
index 00000000..e77424c4
--- /dev/null
+++ b/docker/run_pg_tests.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# This script runs the PostgreSQL tests inside a Docker container. It expects
+# the relevant source files to be mounted into /src (done automatically by the
+# caller script). It will set up the database, run it, and then use the tox
+# configuration to run the tests.
+
+set -e
+
+# Set PGUSER so Synapse's tests know what user to connect to the database with
+export PGUSER=postgres
+
+# Initialise & start the database
+su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
+su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
+
+# Run the tests
+cd /src
+export TRIAL_FLAGS="-j 4"
+tox --workdir=/tmp -e py27-postgres
diff --git a/docker/start.py b/docker/start.py
index 90e8b9c5..346df8c8 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -5,6 +5,7 @@ import os
import sys
import subprocess
import glob
+import codecs
# Utility functions
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
@@ -23,7 +24,7 @@ def generate_secrets(environ, secrets):
with open(filename) as handle: value = handle.read()
else:
print("Generating a random secret for {}".format(name))
- value = os.urandom(32).encode("hex")
+ value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle: handle.write(value)
environ[secret] = value
diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md
index 064eae82..c586b5f0 100644
--- a/docs/consent_tracking.md
+++ b/docs/consent_tracking.md
@@ -31,7 +31,7 @@ Note that the templates must be stored under a name giving the language of the
template - currently this must always be `en` (for "English");
internationalisation support is intended for the future.
-The template for the policy itself should be versioned and named according to
+The template for the policy itself should be versioned and named according to
the version: for example `1.0.html`. The version of the policy which the user
has agreed to is stored in the database.
@@ -85,6 +85,37 @@ Once this is complete, and the server has been restarted, try visiting
an error "Missing string query parameter 'u'". It is now possible to manually
construct URIs where users can give their consent.
+### Enabling consent tracking at registration
+
+1. Add the following to your configuration:
+
+ ```yaml
+ user_consent:
+ require_at_registration: true
+ policy_name: "Privacy Policy" # or whatever you'd like to call the policy
+ ```
+
+2. In your consent templates, make use of the `public_version` variable to
+ see if an unauthenticated user is viewing the page. This is typically
+ wrapped around the form that would be used to actually agree to the document:
+
+ ```
+ {% if not public_version %}
+ <!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
+ <form method="post" action="consent">
+ <input type="hidden" name="v" value="{{version}}"/>
+ <input type="hidden" name="u" value="{{user}}"/>
+ <input type="hidden" name="h" value="{{userhmac}}"/>
+ <input type="submit" value="Sure thing!"/>
+ </form>
+ {% endif %}
+ ```
+
+3. Restart Synapse to apply the changes.
+
+Visiting `https://<server>/_matrix/consent` should now give you a view of the privacy
+document. This is what users will be able to see when registering for accounts.
+
### Constructing the consent URI
It may be useful to manually construct the "consent URI" for a given user - for
@@ -106,6 +137,12 @@ query parameters:
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
+Note that not providing a `u` parameter will be interpreted as wanting to view
+the document from an unauthenticated perspective, such as prior to registration.
+Therefore, the `h` parameter is not required in this scenario. To enable this
+behaviour, set `require_at_registration` to `true` in your `user_consent` config.
+
+
Sending users a server notice asking them to agree to the policy
----------------------------------------------------------------
diff --git a/docs/privacy_policy_templates/en/1.0.html b/docs/privacy_policy_templates/en/1.0.html
index 55c5e4b6..321c7e46 100644
--- a/docs/privacy_policy_templates/en/1.0.html
+++ b/docs/privacy_policy_templates/en/1.0.html
@@ -12,12 +12,15 @@
<p>
All your base are belong to us.
</p>
- <form method="post" action="consent">
- <input type="hidden" name="v" value="{{version}}"/>
- <input type="hidden" name="u" value="{{user}}"/>
- <input type="hidden" name="h" value="{{userhmac}}"/>
- <input type="submit" value="Sure thing!"/>
- </form>
+ {% if not public_version %}
+ <!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
+ <form method="post" action="consent">
+ <input type="hidden" name="v" value="{{version}}"/>
+ <input type="hidden" name="u" value="{{user}}"/>
+ <input type="hidden" name="h" value="{{userhmac}}"/>
+ <input type="submit" value="Sure thing!"/>
+ </form>
+ {% endif %}
{% endif %}
</body>
</html>
diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh
deleted file mode 100755
index 07979bf8..00000000
--- a/jenkins-dendron-haproxy-postgres.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
-./dendron/jenkins/build_dendron.sh
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
- --dendron $WORKSPACE/dendron/bin/dendron \
- --haproxy \
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
deleted file mode 100755
index 3b932fe3..00000000
--- a/jenkins-dendron-postgres.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
-./dendron/jenkins/build_dendron.sh
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
- --dendron $WORKSPACE/dendron/bin/dendron \
diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh
deleted file mode 100755
index 11f1cab6..00000000
--- a/jenkins-flake8.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
-
-# Output flake8 violations to violations.flake8.log
-export PEP8SUFFIX="--output-file=violations.flake8.log"
-
-rm .coverage* || echo "No coverage files to remove"
-
-tox -e packaging -e pep8
diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh
deleted file mode 100755
index 1afb7363..00000000
--- a/jenkins-postgres.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh
deleted file mode 100755
index baf4713a..00000000
--- a/jenkins-sqlite.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh
deleted file mode 100755
index 4c2f103e..00000000
--- a/jenkins-unittests.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
-
-# Output flake8 violations to violations.flake8.log
-# Don't exit with non-0 status code on Jenkins,
-# so that the build steps continue and a later step can decided whether to
-# UNSTABLE or FAILURE this build.
-export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
-
-rm .coverage* || echo "No coverage files to remove"
-
-tox --notest -e py27
-TOX_BIN=$WORKSPACE/.tox/py27/bin
-python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
-$TOX_BIN/pip install lxml
-
-tox -e py27
diff --git a/jenkins/clone.sh b/jenkins/clone.sh
deleted file mode 100755
index ab30ac77..00000000
--- a/jenkins/clone.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#! /bin/bash
-
-# This clones a project from github into a named subdirectory
-# If the project has a branch with the same name as this branch
-# then it will checkout that branch after cloning.
-# Otherwise it will checkout "origin/develop."
-# The first argument is the name of the directory to checkout
-# the branch into.
-# The second argument is the URL of the remote repository to checkout.
-# Usually something like https://github.com/matrix-org/sytest.git
-
-set -eux
-
-NAME=$1
-PROJECT=$2
-BASE=".$NAME-base"
-
-# Update our mirror.
-if [ ! -d ".$NAME-base" ]; then
- # Create a local mirror of the source repository.
- # This saves us from having to download the entire repository
- # when this script is next run.
- git clone "$PROJECT" "$BASE" --mirror
-else
- # Fetch any updates from the source repository.
- (cd "$BASE"; git fetch -p)
-fi
-
-# Remove the existing repository so that we have a clean copy
-rm -rf "$NAME"
-# Cloning with --shared means that we will share portions of the
-# .git directory with our local mirror.
-git clone "$BASE" "$NAME" --shared
-
-# Jenkins may have supplied us with the name of the branch in the
-# environment. Otherwise we will have to guess based on the current
-# commit.
-: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
-cd "$NAME"
-# check out the relevant branch
-git checkout "${GIT_BRANCH}" || (
- echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
- git checkout "origin/develop"
-)
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
index d95ca846..016afb8b 100755
--- a/jenkins/prepare_synapse.sh
+++ b/jenkins/prepare_synapse.sh
@@ -14,22 +14,3 @@ fi
# set up the virtualenv
tox -e py27 --notest -v
-
-TOX_BIN=$TOX_DIR/py27/bin
-
-# cryptography 2.2 requires setuptools >= 18.5.
-#
-# older versions of virtualenv (?) give us a virtualenv with the same version
-# of setuptools as is installed on the system python (and tox runs virtualenv
-# under python3, so we get the version of setuptools that is installed on that).
-#
-# anyway, make sure that we have a recent enough setuptools.
-$TOX_BIN/pip install 'setuptools>=18.5'
-
-# we also need a semi-recent version of pip, because old ones fail to install
-# the "enum34" dependency of cryptography.
-$TOX_BIN/pip install 'pip>=10'
-
-{ python synapse/python_dependencies.py
- echo lxml
-} | xargs $TOX_BIN/pip install
diff --git a/scripts-dev/check_auth.py b/scripts-dev/check_auth.py
index 4fa8792a..b3d11f49 100644
--- a/scripts-dev/check_auth.py
+++ b/scripts-dev/check_auth.py
@@ -1,21 +1,20 @@
-from synapse.events import FrozenEvent
-from synapse.api.auth import Auth
-
-from mock import Mock
+from __future__ import print_function
import argparse
import itertools
import json
import sys
+from mock import Mock
+
+from synapse.api.auth import Auth
+from synapse.events import FrozenEvent
+
def check_auth(auth, auth_chain, events):
auth_chain.sort(key=lambda e: e.depth)
- auth_map = {
- e.event_id: e
- for e in auth_chain
- }
+ auth_map = {e.event_id: e for e in auth_chain}
create_events = {}
for e in auth_chain:
@@ -25,31 +24,26 @@ def check_auth(auth, auth_chain, events):
for e in itertools.chain(auth_chain, events):
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
- auth_events = {
- (e.type, e.state_key): e
- for e in auth_events_list
- }
+ auth_events = {(e.type, e.state_key): e for e in auth_events_list}
auth_events[("m.room.create", "")] = create_events[e.room_id]
try:
auth.check(e, auth_events=auth_events)
except Exception as ex:
- print "Failed:", e.event_id, e.type, e.state_key
- print "Auth_events:", auth_events
- print ex
- print json.dumps(e.get_dict(), sort_keys=True, indent=4)
+ print("Failed:", e.event_id, e.type, e.state_key)
+ print("Auth_events:", auth_events)
+ print(ex)
+ print(json.dumps(e.get_dict(), sort_keys=True, indent=4))
# raise
- print "Success:", e.event_id, e.type, e.state_key
+ print("Success:", e.event_id, e.type, e.state_key)
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
- 'json',
- nargs='?',
- type=argparse.FileType('r'),
- default=sys.stdin,
+ 'json', nargs='?', type=argparse.FileType('r'), default=sys.stdin
)
args = parser.parse_args()
diff --git a/scripts-dev/check_event_hash.py b/scripts-dev/check_event_hash.py
index 7ccae34d..8535f996 100644
--- a/scripts-dev/check_event_hash.py
+++ b/scripts-dev/check_event_hash.py
@@ -1,10 +1,15 @@
-from synapse.crypto.event_signing import *
-from unpaddedbase64 import encode_base64
-
import argparse
import hashlib
-import sys
import json
+import logging
+import sys
+
+from unpaddedbase64 import encode_base64
+
+from synapse.crypto.event_signing import (
+ check_event_content_hash,
+ compute_event_reference_hash,
+)
class dictobj(dict):
@@ -24,27 +29,26 @@ class dictobj(dict):
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
- default=sys.stdin)
+ parser.add_argument(
+ "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
+ )
args = parser.parse_args()
logging.basicConfig()
event_json = dictobj(json.load(args.input_json))
- algorithms = {
- "sha256": hashlib.sha256,
- }
+ algorithms = {"sha256": hashlib.sha256}
for alg_name in event_json.hashes:
if check_event_content_hash(event_json, algorithms[alg_name]):
- print "PASS content hash %s" % (alg_name,)
+ print("PASS content hash %s" % (alg_name,))
else:
- print "FAIL content hash %s" % (alg_name,)
+ print("FAIL content hash %s" % (alg_name,))
for algorithm in algorithms.values():
name, h_bytes = compute_event_reference_hash(event_json, algorithm)
- print "Reference hash %s: %s" % (name, encode_base64(h_bytes))
+ print("Reference hash %s: %s" % (name, encode_base64(h_bytes)))
-if __name__=="__main__":
- main()
+if __name__ == "__main__":
+ main()
diff --git a/scripts-dev/check_signature.py b/scripts-dev/check_signature.py
index 07957790..612f17ca 100644
--- a/scripts-dev/check_signature.py
+++ b/scripts-dev/check_signature.py
@@ -1,15 +1,15 @@
-from signedjson.sign import verify_signed_json
-from signedjson.key import decode_verify_key_bytes, write_signing_keys
-from unpaddedbase64 import decode_base64
-
-import urllib2
+import argparse
import json
+import logging
import sys
+import urllib2
+
import dns.resolver
-import pprint
-import argparse
-import logging
+from signedjson.key import decode_verify_key_bytes, write_signing_keys
+from signedjson.sign import verify_signed_json
+from unpaddedbase64 import decode_base64
+
def get_targets(server_name):
if ":" in server_name:
@@ -23,6 +23,7 @@ def get_targets(server_name):
except dns.resolver.NXDOMAIN:
yield (server_name, 8448)
+
def get_server_keys(server_name, target, port):
url = "https://%s:%i/_matrix/key/v1" % (target, port)
keys = json.load(urllib2.urlopen(url))
@@ -33,12 +34,14 @@ def get_server_keys(server_name, target, port):
verify_keys[key_id] = verify_key
return verify_keys
+
def main():
parser = argparse.ArgumentParser()
parser.add_argument("signature_name")
- parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
- default=sys.stdin)
+ parser.add_argument(
+ "input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
+ )
args = parser.parse_args()
logging.basicConfig()
@@ -48,24 +51,23 @@ def main():
for target, port in get_targets(server_name):
try:
keys = get_server_keys(server_name, target, port)
- print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port)
+ print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
write_signing_keys(sys.stdout, keys.values())
break
- except:
+ except Exception:
logging.exception("Error talking to %s:%s", target, port)
json_to_check = json.load(args.input_json)
- print "Checking JSON:"
+ print("Checking JSON:")
for key_id in json_to_check["signatures"][args.signature_name]:
try:
key = keys[key_id]
verify_signed_json(json_to_check, args.signature_name, key)
- print "PASS %s" % (key_id,)
- except:
+ print("PASS %s" % (key_id,))
+ except Exception:
logging.exception("Check for key %s failed" % (key_id,))
- print "FAIL %s" % (key_id,)
+ print("FAIL %s" % (key_id,))
if __name__ == '__main__':
main()
-
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
index 151551f2..dde85966 100644
--- a/scripts-dev/convert_server_keys.py
+++ b/scripts-dev/convert_server_keys.py
@@ -1,13 +1,21 @@
-import psycopg2
-import yaml
-import sys
+import hashlib
import json
+import sys
import time
-import hashlib
-from unpaddedbase64 import encode_base64
+
+import six
+
+import psycopg2
+import yaml
+from canonicaljson import encode_canonical_json
from signedjson.key import read_signing_keys
from signedjson.sign import sign_json
-from canonicaljson import encode_canonical_json
+from unpaddedbase64 import encode_base64
+
+if six.PY2:
+ db_type = six.moves.builtins.buffer
+else:
+ db_type = memoryview
def select_v1_keys(connection):
@@ -39,7 +47,9 @@ def select_v2_json(connection):
cursor.close()
results = {}
for server_name, key_id, key_json in rows:
- results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
+ results.setdefault(server_name, {})[key_id] = json.loads(
+ str(key_json).decode("utf-8")
+ )
return results
@@ -47,10 +57,7 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
return {
"old_verify_keys": {},
"server_name": server_name,
- "verify_keys": {
- key_id: {"key": key}
- for key_id, key in keys.items()
- },
+ "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
"valid_until_ts": valid_until,
"tls_fingerprints": [fingerprint(certificate)],
}
@@ -65,7 +72,7 @@ def rows_v2(server, json):
valid_until = json["valid_until_ts"]
key_json = encode_canonical_json(json)
for key_id in json["verify_keys"]:
- yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
+ yield (server, key_id, "-", valid_until, valid_until, db_type(key_json))
def main():
@@ -87,7 +94,7 @@ def main():
result = {}
for server in keys:
- if not server in json:
+ if server not in json:
v2_json = convert_v1_to_v2(
server, valid_until, keys[server], certificates[server]
)
@@ -96,10 +103,7 @@ def main():
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
- rows = list(
- row for server, json in result.items()
- for row in rows_v2(server, json)
- )
+ rows = list(row for server, json in result.items() for row in rows_v2(server, json))
cursor = connection.cursor()
cursor.executemany(
@@ -107,7 +111,7 @@ def main():
" server_name, key_id, from_server,"
" ts_added_ms, ts_valid_until_ms, key_json"
") VALUES (%s, %s, %s, %s, %s, %s)",
- rows
+ rows,
)
connection.commit()
diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl
deleted file mode 100755
index 13e630fc..00000000
--- a/scripts-dev/copyrighter-sql.pl
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/perl -pi
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$copyright = <<EOT;
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-EOT
-
-s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/copyrighter.pl b/scripts-dev/copyrighter.pl
deleted file mode 100755
index 03656f69..00000000
--- a/scripts-dev/copyrighter.pl
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/perl -pi
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$copyright = <<EOT;
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-EOT
-
-s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py
index 47dac777..1deb0fe2 100755
--- a/scripts-dev/definitions.py
+++ b/scripts-dev/definitions.py
@@ -1,8 +1,16 @@
#! /usr/bin/python
+from __future__ import print_function
+
+import argparse
import ast
+import os
+import re
+import sys
+
import yaml
+
class DefinitionVisitor(ast.NodeVisitor):
def __init__(self):
super(DefinitionVisitor, self).__init__()
@@ -42,15 +50,18 @@ def non_empty(defs):
functions = {name: non_empty(f) for name, f in defs['def'].items()}
classes = {name: non_empty(f) for name, f in defs['class'].items()}
result = {}
- if functions: result['def'] = functions
- if classes: result['class'] = classes
+ if functions:
+ result['def'] = functions
+ if classes:
+ result['class'] = classes
names = defs['names']
uses = []
for name in names.get('Load', ()):
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
uses.append(name)
uses.extend(defs['attrs'])
- if uses: result['uses'] = uses
+ if uses:
+ result['uses'] = uses
result['names'] = names
result['attrs'] = defs['attrs']
return result
@@ -95,7 +106,6 @@ def used_names(prefix, item, defs, names):
if __name__ == '__main__':
- import sys, os, argparse, re
parser = argparse.ArgumentParser(description='Find definitions.')
parser.add_argument(
@@ -105,24 +115,28 @@ if __name__ == '__main__':
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
)
parser.add_argument(
- "--pattern", action="append", metavar="REGEXP",
- help="Search for a pattern"
+ "--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
)
parser.add_argument(
- "directories", nargs='+', metavar="DIR",
- help="Directories to search for definitions"
+ "directories",
+ nargs='+',
+ metavar="DIR",
+ help="Directories to search for definitions",
)
parser.add_argument(
- "--referrers", default=0, type=int,
- help="Include referrers up to the given depth"
+ "--referrers",
+ default=0,
+ type=int,
+ help="Include referrers up to the given depth",
)
parser.add_argument(
- "--referred", default=0, type=int,
- help="Include referred down to the given depth"
+ "--referred",
+ default=0,
+ type=int,
+ help="Include referred down to the given depth",
)
parser.add_argument(
- "--format", default="yaml",
- help="Output format, one of 'yaml' or 'dot'"
+ "--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
@@ -162,7 +176,7 @@ if __name__ == '__main__':
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
- if not name in referrers:
+ if name not in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
@@ -176,7 +190,7 @@ if __name__ == '__main__':
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
- if not name in referred:
+ if name not in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
@@ -185,12 +199,12 @@ if __name__ == '__main__':
if args.format == 'yaml':
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == 'dot':
- print "digraph {"
+ print("digraph {")
for name, entry in result.items():
- print name
+ print(name)
for used_by in entry.get("used", ()):
if used_by in result:
- print used_by, "->", name
- print "}"
+ print(used_by, "->", name)
+ print("}")
else:
raise ValueError("Unknown format %r" % (args.format))
diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py
index 6e45be75..22b30fa7 100755
--- a/scripts-dev/dump_macaroon.py
+++ b/scripts-dev/dump_macaroon.py
@@ -1,8 +1,11 @@
#!/usr/bin/env python2
-import pymacaroons
+from __future__ import print_function
+
import sys
+import pymacaroons
+
if len(sys.argv) == 1:
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
sys.exit(1)
@@ -11,14 +14,14 @@ macaroon_string = sys.argv[1]
key = sys.argv[2] if len(sys.argv) > 2 else None
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
-print macaroon.inspect()
+print(macaroon.inspect())
-print ""
+print("")
verifier = pymacaroons.Verifier()
verifier.satisfy_general(lambda c: True)
try:
verifier.verify(macaroon, key)
- print "Signature is correct"
+ print("Signature is correct")
except Exception as e:
- print e.message
+ print(str(e))
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index d2acc765..e0287c8c 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -18,21 +18,21 @@
from __future__ import print_function
import argparse
+import base64
+import json
+import sys
from urlparse import urlparse, urlunparse
import nacl.signing
-import json
-import base64
import requests
-import sys
-
-from requests.adapters import HTTPAdapter
import srvlookup
import yaml
+from requests.adapters import HTTPAdapter
# uncomment the following to enable debug logging of http requests
-#from httplib import HTTPConnection
-#HTTPConnection.debuglevel = 1
+# from httplib import HTTPConnection
+# HTTPConnection.debuglevel = 1
+
def encode_base64(input_bytes):
"""Encode bytes as a base64 string without any padding."""
@@ -58,15 +58,15 @@ def decode_base64(input_string):
def encode_canonical_json(value):
return json.dumps(
- value,
- # Encode code-points outside of ASCII as UTF-8 rather than \u escapes
- ensure_ascii=False,
- # Remove unecessary white space.
- separators=(',',':'),
- # Sort the keys of dictionaries.
- sort_keys=True,
- # Encode the resulting unicode as UTF-8 bytes.
- ).encode("UTF-8")
+ value,
+ # Encode code-points outside of ASCII as UTF-8 rather than \u escapes
+ ensure_ascii=False,
+ # Remove unecessary white space.
+ separators=(',', ':'),
+ # Sort the keys of dictionaries.
+ sort_keys=True,
+ # Encode the resulting unicode as UTF-8 bytes.
+ ).encode("UTF-8")
def sign_json(json_object, signing_key, signing_name):
@@ -88,6 +88,7 @@ def sign_json(json_object, signing_key, signing_name):
NACL_ED25519 = "ed25519"
+
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
@@ -143,25 +144,25 @@ def request_json(method, origin_name, origin_key, destination, path, content):
authorization_headers = []
for key, sig in signed_json["signatures"][origin_name].items():
- header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
- origin_name, key, sig,
- )
+ header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig)
authorization_headers.append(bytes(header))
- print ("Authorization: %s" % header, file=sys.stderr)
+ print("Authorization: %s" % header, file=sys.stderr)
dest = "matrix://%s%s" % (destination, path)
- print ("Requesting %s" % dest, file=sys.stderr)
+ print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
s.mount("matrix://", MatrixConnectionAdapter())
+ headers = {"Host": destination, "Authorization": authorization_headers[0]}
+
+ if method == "POST":
+ headers["Content-Type"] = "application/json"
+
result = s.request(
method=method,
url=dest,
- headers={
- "Host": destination,
- "Authorization": authorization_headers[0]
- },
+ headers=headers,
verify=False,
data=content,
)
@@ -171,50 +172,50 @@ def request_json(method, origin_name, origin_key, destination, path, content):
def main():
parser = argparse.ArgumentParser(
- description=
- "Signs and sends a federation request to a matrix homeserver",
+ description="Signs and sends a federation request to a matrix homeserver"
)
parser.add_argument(
- "-N", "--server-name",
+ "-N",
+ "--server-name",
help="Name to give as the local homeserver. If unspecified, will be "
- "read from the config file.",
+ "read from the config file.",
)
parser.add_argument(
- "-k", "--signing-key-path",
+ "-k",
+ "--signing-key-path",
help="Path to the file containing the private ed25519 key to sign the "
- "request with.",
+ "request with.",
)
parser.add_argument(
- "-c", "--config",
+ "-c",
+ "--config",
default="homeserver.yaml",
help="Path to server config file. Ignored if --server-name and "
- "--signing-key-path are both given.",
+ "--signing-key-path are both given.",
)
parser.add_argument(
- "-d", "--destination",
+ "-d",
+ "--destination",
default="matrix.org",
help="name of the remote homeserver. We will do SRV lookups and "
- "connect appropriately.",
+ "connect appropriately.",
)
parser.add_argument(
- "-X", "--method",
- help="HTTP method to use for the request. Defaults to GET if --data is"
- "unspecified, POST if it is."
+ "-X",
+ "--method",
+ help="HTTP method to use for the request. Defaults to GET if --body is"
+ "unspecified, POST if it is.",
)
- parser.add_argument(
- "--body",
- help="Data to send as the body of the HTTP request"
- )
+ parser.add_argument("--body", help="Data to send as the body of the HTTP request")
parser.add_argument(
- "path",
- help="request path. We will add '/_matrix/federation/v1/' to this."
+ "path", help="request path. We will add '/_matrix/federation/v1/' to this."
)
args = parser.parse_args()
@@ -227,13 +228,15 @@ def main():
result = request_json(
args.method,
- args.server_name, key, args.destination,
+ args.server_name,
+ key,
+ args.destination,
"/_matrix/federation/v1/" + args.path,
content=args.body,
)
json.dump(result, sys.stdout)
- print ("")
+ print("")
def read_args_from_config(args):
@@ -253,7 +256,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
return s, 8448
if ":" in s:
- out = s.rsplit(":",1)
+ out = s.rsplit(":", 1)
try:
port = int(out[1])
except ValueError:
@@ -263,7 +266,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
try:
srv = srvlookup.lookup("matrix", "tcp", s)[0]
return srv.host, srv.port
- except:
+ except Exception:
return s, 8448
def get_connection(self, url, proxies=None):
@@ -272,10 +275,9 @@ class MatrixConnectionAdapter(HTTPAdapter):
(host, port) = self.lookup(parsed.netloc)
netloc = "%s:%d" % (host, port)
print("Connecting to %s" % (netloc,), file=sys.stderr)
- url = urlunparse((
- "https", netloc, parsed.path, parsed.params, parsed.query,
- parsed.fragment,
- ))
+ url = urlunparse(
+ ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
+ )
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py
index 616d6a10..514d80fa 100644
--- a/scripts-dev/hash_history.py
+++ b/scripts-dev/hash_history.py
@@ -1,23 +1,31 @@
-from synapse.storage.pdu import PduStore
-from synapse.storage.signatures import SignatureStore
-from synapse.storage._base import SQLBaseStore
-from synapse.federation.units import Pdu
-from synapse.crypto.event_signing import (
- add_event_pdu_content_hash, compute_pdu_event_reference_hash
-)
-from synapse.api.events.utils import prune_pdu
-from unpaddedbase64 import encode_base64, decode_base64
-from canonicaljson import encode_canonical_json
+from __future__ import print_function
+
import sqlite3
import sys
+from unpaddedbase64 import decode_base64, encode_base64
+
+from synapse.crypto.event_signing import (
+ add_event_pdu_content_hash,
+ compute_pdu_event_reference_hash,
+)
+from synapse.federation.units import Pdu
+from synapse.storage._base import SQLBaseStore
+from synapse.storage.pdu import PduStore
+from synapse.storage.signatures import SignatureStore
+
+
class Store(object):
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
- _get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"]
+ _get_pdu_origin_signatures_txn = SignatureStore.__dict__[
+ "_get_pdu_origin_signatures_txn"
+ ]
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
- _store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"]
+ _store_pdu_reference_hash_txn = SignatureStore.__dict__[
+ "_store_pdu_reference_hash_txn"
+ ]
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
@@ -26,9 +34,7 @@ store = Store()
def select_pdus(cursor):
- cursor.execute(
- "SELECT pdu_id, origin FROM pdus ORDER BY depth ASC"
- )
+ cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
ids = cursor.fetchall()
@@ -41,23 +47,30 @@ def select_pdus(cursor):
for pdu in pdus:
try:
if pdu.prev_pdus:
- print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
+ print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
for pdu_id, origin, hashes in pdu.prev_pdus:
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
hashes[ref_alg] = encode_base64(ref_hsh)
- store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh)
- print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
+ store._store_prev_pdu_hash_txn(
+ cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
+ )
+ print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
pdu = add_event_pdu_content_hash(pdu)
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
- store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh)
+ store._store_pdu_reference_hash_txn(
+ cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
+ )
for alg, hsh_base64 in pdu.hashes.items():
- print alg, hsh_base64
- store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64))
+ print(alg, hsh_base64)
+ store._store_pdu_content_hash_txn(
+ cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
+ )
+
+ except Exception:
+ print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
- except:
- print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus
def main():
conn = sqlite3.connect(sys.argv[1])
@@ -65,5 +78,6 @@ def main():
select_pdus(cursor)
conn.commit()
-if __name__=='__main__':
+
+if __name__ == '__main__':
main()
diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py
index 58d40c4f..da027be2 100755
--- a/scripts-dev/list_url_patterns.py
+++ b/scripts-dev/list_url_patterns.py
@@ -1,18 +1,17 @@
#! /usr/bin/python
-import ast
import argparse
+import ast
import os
import sys
+
import yaml
PATTERNS_V1 = []
PATTERNS_V2 = []
-RESULT = {
- "v1": PATTERNS_V1,
- "v2": PATTERNS_V2,
-}
+RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
+
class CallVisitor(ast.NodeVisitor):
def visit_Call(self, node):
@@ -21,7 +20,6 @@ class CallVisitor(ast.NodeVisitor):
else:
return
-
if name == "client_path_patterns":
PATTERNS_V1.append(node.args[0].s)
elif name == "client_v2_patterns":
@@ -42,8 +40,10 @@ def find_patterns_in_file(filepath):
parser = argparse.ArgumentParser(description='Find url patterns.')
parser.add_argument(
- "directories", nargs='+', metavar="DIR",
- help="Directories to search for definitions"
+ "directories",
+ nargs='+',
+ metavar="DIR",
+ help="Directories to search for definitions",
)
args = parser.parse_args()
diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl
deleted file mode 100755
index cbff63e2..00000000
--- a/scripts-dev/make_identicons.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env perl
-
-use strict;
-use warnings;
-
-use DBI;
-use DBD::SQLite;
-use JSON;
-use Getopt::Long;
-
-my $db; # = "homeserver.db";
-my $server = "http://localhost:8008";
-my $size = 320;
-
-GetOptions("db|d=s", \$db,
- "server|s=s", \$server,
- "width|w=i", \$size) or usage();
-
-usage() unless $db;
-
-my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
-
-my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
-
-foreach (@$res) {
- my ($token, $mxid) = ($_->[0], $_->[1]);
- my ($user_id) = ($mxid =~ m/@(.*):/);
- my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
- if (!$url || $url =~ /#auto$/) {
- `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
- my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
- my $content_uri = from_json($json)->{content_uri};
- `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
- }
-}
-
-sub usage {
- die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
-} \ No newline at end of file
diff --git a/scripts-dev/next_github_number.sh b/scripts-dev/next_github_number.sh
new file mode 100755
index 00000000..37628002
--- /dev/null
+++ b/scripts-dev/next_github_number.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -e
+
+# Fetch the current GitHub issue number, add one to it -- presto! The likely
+# next PR number.
+CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
+CURRENT_NUMBER=$((CURRENT_NUMBER+1))
+echo $CURRENT_NUMBER \ No newline at end of file
diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh
deleted file mode 100755
index c62928af..00000000
--- a/scripts-dev/nuke-room-from-db.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-## CAUTION:
-## This script will remove (hopefully) all trace of the given room ID from
-## your homeserver.db
-
-## Do not run it lightly.
-
-set -e
-
-if [ "$1" == "-h" ] || [ "$1" == "" ]; then
- echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
- echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
- echo "or"
- echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
- exit
-fi
-
-ROOMID="$1"
-
-cat <<EOF
-DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
-DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
-DELETE FROM event_edges WHERE room_id = '$ROOMID';
-DELETE FROM room_depth WHERE room_id = '$ROOMID';
-DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
-DELETE FROM events WHERE room_id = '$ROOMID';
-DELETE FROM event_json WHERE room_id = '$ROOMID';
-DELETE FROM state_events WHERE room_id = '$ROOMID';
-DELETE FROM current_state_events WHERE room_id = '$ROOMID';
-DELETE FROM room_memberships WHERE room_id = '$ROOMID';
-DELETE FROM feedback WHERE room_id = '$ROOMID';
-DELETE FROM topics WHERE room_id = '$ROOMID';
-DELETE FROM room_names WHERE room_id = '$ROOMID';
-DELETE FROM rooms WHERE room_id = '$ROOMID';
-DELETE FROM room_hosts WHERE room_id = '$ROOMID';
-DELETE FROM room_aliases WHERE room_id = '$ROOMID';
-DELETE FROM state_groups WHERE room_id = '$ROOMID';
-DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
-DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
-DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
-DELETE FROM event_search WHERE room_id = '$ROOMID';
-DELETE FROM guest_access WHERE room_id = '$ROOMID';
-DELETE FROM history_visibility WHERE room_id = '$ROOMID';
-DELETE FROM room_tags WHERE room_id = '$ROOMID';
-DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
-DELETE FROM room_account_data WHERE room_id = '$ROOMID';
-DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
-DELETE FROM local_invites WHERE room_id = '$ROOMID';
-DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
-DELETE FROM event_reports WHERE room_id = '$ROOMID';
-DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
-DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
-DELETE FROM event_auth WHERE room_id = '$ROOMID';
-DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
-VACUUM;
-EOF
diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py
index 18be711e..7c9985d9 100644
--- a/scripts-dev/tail-synapse.py
+++ b/scripts-dev/tail-synapse.py
@@ -1,8 +1,9 @@
-import requests
import collections
+import json
import sys
import time
-import json
+
+import requests
Entry = collections.namedtuple("Entry", "name position rows")
@@ -30,11 +31,11 @@ def parse_response(content):
def replicate(server, streams):
- return parse_response(requests.get(
- server + "/_synapse/replication",
- verify=False,
- params=streams
- ).content)
+ return parse_response(
+ requests.get(
+ server + "/_synapse/replication", verify=False, params=streams
+ ).content
+ )
def main():
@@ -45,16 +46,16 @@ def main():
try:
streams = {
row.name: row.position
- for row in replicate(server, {"streams":"-1"})["streams"].rows
+ for row in replicate(server, {"streams": "-1"})["streams"].rows
}
- except requests.exceptions.ConnectionError as e:
+ except requests.exceptions.ConnectionError:
time.sleep(0.1)
while True:
try:
results = replicate(server, streams)
- except:
- sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
+ except Exception:
+ sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
break
for update in results.values():
for row in update.rows:
@@ -62,6 +63,5 @@ def main():
streams[update.name] = update.position
-
-if __name__=='__main__':
+if __name__ == '__main__':
main()
diff --git a/scripts/hash_password b/scripts/hash_password
index 215ab25c..a1eb0769 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -1,17 +1,17 @@
#!/usr/bin/env python
import argparse
-
+import getpass
import sys
+import unicodedata
import bcrypt
-import getpass
-
import yaml
-bcrypt_rounds=12
+bcrypt_rounds = 12
password_pepper = ""
+
def prompt_for_pass():
password = getpass.getpass("Password: ")
@@ -25,19 +25,27 @@ def prompt_for_pass():
return password
+
if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description="Calculate the hash of a new password, so that passwords"
- " can be reset")
+ description=(
+ "Calculate the hash of a new password, so that passwords can be reset"
+ )
+ )
parser.add_argument(
- "-p", "--password",
+ "-p",
+ "--password",
default=None,
help="New password for user. Will prompt if omitted.",
)
parser.add_argument(
- "-c", "--config",
+ "-c",
+ "--config",
type=argparse.FileType('r'),
- help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
+ help=(
+ "Path to server config file. "
+ "Used to read in bcrypt_rounds and password_pepper."
+ ),
)
args = parser.parse_args()
@@ -51,5 +59,21 @@ if __name__ == "__main__":
if not password:
password = prompt_for_pass()
- print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
+ # On Python 2, make sure we decode it to Unicode before we normalise it
+ if isinstance(password, bytes):
+ try:
+ password = password.decode(sys.stdin.encoding)
+ except UnicodeDecodeError:
+ print(
+ "ERROR! Your password is not decodable using your terminal encoding (%s)."
+ % (sys.stdin.encoding,)
+ )
+
+ pw = unicodedata.normalize("NFKC", password)
+
+ hashed = bcrypt.hashpw(
+ pw.encode('utf8') + password_pepper.encode("utf8"),
+ bcrypt.gensalt(bcrypt_rounds),
+ ).decode('ascii')
+ print(hashed)
diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py
index 7914ead8..e630936f 100755
--- a/scripts/move_remote_media_to_new_store.py
+++ b/scripts/move_remote_media_to_new_store.py
@@ -36,12 +36,9 @@ from __future__ import print_function
import argparse
import logging
-
-import sys
-
import os
-
import shutil
+import sys
from synapse.rest.media.v1.filepath import MediaFilePaths
@@ -77,24 +74,23 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
if not os.path.exists(original_file):
logger.warn(
"Original for %s/%s (%s) does not exist",
- origin_server, file_id, original_file,
+ origin_server,
+ file_id,
+ original_file,
)
else:
mkdir_and_move(
- original_file,
- dest_paths.remote_media_filepath(origin_server, file_id),
+ original_file, dest_paths.remote_media_filepath(origin_server, file_id)
)
# now look for thumbnails
- original_thumb_dir = src_paths.remote_media_thumbnail_dir(
- origin_server, file_id,
- )
+ original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id)
if not os.path.exists(original_thumb_dir):
return
mkdir_and_move(
original_thumb_dir,
- dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
+ dest_paths.remote_media_thumbnail_dir(origin_server, file_id),
)
@@ -109,24 +105,16 @@ def mkdir_and_move(original_file, dest_file):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class = argparse.RawDescriptionHelpFormatter,
- )
- parser.add_argument(
- "-v", action='store_true', help='enable debug logging')
- parser.add_argument(
- "src_repo",
- help="Path to source content repo",
- )
- parser.add_argument(
- "dest_repo",
- help="Path to source content repo",
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
+ parser.add_argument("-v", action='store_true', help='enable debug logging')
+ parser.add_argument("src_repo", help="Path to source content repo")
+ parser.add_argument("dest_repo", help="Path to source content repo")
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
- "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
+ "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
logging.basicConfig(**logging_config)
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 8c3d4293..b450712a 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -14,187 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
-import argparse
-import getpass
-import hashlib
-import hmac
-import json
-import sys
-import urllib2
-import yaml
-
-
-def request_registration(user, password, server_location, shared_secret, admin=False):
- req = urllib2.Request(
- "%s/_matrix/client/r0/admin/register" % (server_location,),
- headers={'Content-Type': 'application/json'}
- )
-
- try:
- if sys.version_info[:3] >= (2, 7, 9):
- # As of version 2.7.9, urllib2 now checks SSL certs
- import ssl
- f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
- else:
- f = urllib2.urlopen(req)
- body = f.read()
- f.close()
- nonce = json.loads(body)["nonce"]
- except urllib2.HTTPError as e:
- print "ERROR! Received %d %s" % (e.code, e.reason,)
- if 400 <= e.code < 500:
- if e.info().type == "application/json":
- resp = json.load(e)
- if "error" in resp:
- print resp["error"]
- sys.exit(1)
-
- mac = hmac.new(
- key=shared_secret,
- digestmod=hashlib.sha1,
- )
-
- mac.update(nonce)
- mac.update("\x00")
- mac.update(user)
- mac.update("\x00")
- mac.update(password)
- mac.update("\x00")
- mac.update("admin" if admin else "notadmin")
-
- mac = mac.hexdigest()
-
- data = {
- "nonce": nonce,
- "username": user,
- "password": password,
- "mac": mac,
- "admin": admin,
- }
-
- server_location = server_location.rstrip("/")
-
- print "Sending registration request..."
-
- req = urllib2.Request(
- "%s/_matrix/client/r0/admin/register" % (server_location,),
- data=json.dumps(data),
- headers={'Content-Type': 'application/json'}
- )
- try:
- if sys.version_info[:3] >= (2, 7, 9):
- # As of version 2.7.9, urllib2 now checks SSL certs
- import ssl
- f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
- else:
- f = urllib2.urlopen(req)
- f.read()
- f.close()
- print "Success."
- except urllib2.HTTPError as e:
- print "ERROR! Received %d %s" % (e.code, e.reason,)
- if 400 <= e.code < 500:
- if e.info().type == "application/json":
- resp = json.load(e)
- if "error" in resp:
- print resp["error"]
- sys.exit(1)
-
-
-def register_new_user(user, password, server_location, shared_secret, admin):
- if not user:
- try:
- default_user = getpass.getuser()
- except:
- default_user = None
-
- if default_user:
- user = raw_input("New user localpart [%s]: " % (default_user,))
- if not user:
- user = default_user
- else:
- user = raw_input("New user localpart: ")
-
- if not user:
- print "Invalid user name"
- sys.exit(1)
-
- if not password:
- password = getpass.getpass("Password: ")
-
- if not password:
- print "Password cannot be blank."
- sys.exit(1)
-
- confirm_password = getpass.getpass("Confirm password: ")
-
- if password != confirm_password:
- print "Passwords do not match"
- sys.exit(1)
-
- if not admin:
- admin = raw_input("Make admin [no]: ")
- if admin in ("y", "yes", "true"):
- admin = True
- else:
- admin = False
-
- request_registration(user, password, server_location, shared_secret, bool(admin))
-
+from synapse._scripts.register_new_matrix_user import main
if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Used to register new users with a given home server when"
- " registration has been disabled. The home server must be"
- " configured with the 'registration_shared_secret' option"
- " set.",
- )
- parser.add_argument(
- "-u", "--user",
- default=None,
- help="Local part of the new user. Will prompt if omitted.",
- )
- parser.add_argument(
- "-p", "--password",
- default=None,
- help="New password for user. Will prompt if omitted.",
- )
- parser.add_argument(
- "-a", "--admin",
- action="store_true",
- help="Register new user as an admin. Will prompt if omitted.",
- )
-
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument(
- "-c", "--config",
- type=argparse.FileType('r'),
- help="Path to server config file. Used to read in shared secret.",
- )
-
- group.add_argument(
- "-k", "--shared-secret",
- help="Shared secret as defined in server config file.",
- )
-
- parser.add_argument(
- "server_url",
- default="https://localhost:8448",
- nargs='?',
- help="URL to use to talk to the home server. Defaults to "
- " 'https://localhost:8448'.",
- )
-
- args = parser.parse_args()
-
- if "config" in args and args.config:
- config = yaml.safe_load(args.config)
- secret = config.get("registration_shared_secret", None)
- if not secret:
- print "No 'registration_shared_secret' defined in config."
- sys.exit(1)
- else:
- secret = args.shared_secret
-
- register_new_user(args.user, args.password, args.server_url, secret, args.admin)
+ main()
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index b9b828c1..3c7b6063 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -15,23 +15,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from twisted.internet import defer, reactor
-from twisted.enterprise import adbapi
-
-from synapse.storage._base import LoggingTransaction, SQLBaseStore
-from synapse.storage.engines import create_engine
-from synapse.storage.prepare_database import prepare_database
-
import argparse
import curses
import logging
import sys
import time
import traceback
-import yaml
from six import string_types
+import yaml
+
+from twisted.enterprise import adbapi
+from twisted.internet import defer, reactor
+
+from synapse.storage._base import LoggingTransaction, SQLBaseStore
+from synapse.storage.engines import create_engine
+from synapse.storage.prepare_database import prepare_database
logger = logging.getLogger("synapse_port_db")
@@ -105,6 +105,7 @@ class Store(object):
*All* database interactions should go through this object.
"""
+
def __init__(self, db_pool, engine):
self.db_pool = db_pool
self.database_engine = engine
@@ -135,7 +136,8 @@ class Store(object):
txn = conn.cursor()
return func(
LoggingTransaction(txn, desc, self.database_engine, [], []),
- *args, **kwargs
+ *args,
+ **kwargs
)
except self.database_engine.module.DatabaseError as e:
if self.database_engine.is_deadlock(e):
@@ -158,22 +160,20 @@ class Store(object):
def r(txn):
txn.execute(sql, args)
return txn.fetchall()
+
return self.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in headers),
- ", ".join("%s" for _ in headers)
+ ", ".join("%s" for _ in headers),
)
try:
txn.executemany(sql, rows)
- except:
- logger.exception(
- "Failed to insert: %s",
- table,
- )
+ except Exception:
+ logger.exception("Failed to insert: %s", table)
raise
@@ -206,7 +206,7 @@ class Porter(object):
"table_name": table,
"forward_rowid": 1,
"backward_rowid": 0,
- }
+ },
)
forward_chunk = 1
@@ -221,10 +221,10 @@ class Porter(object):
table, forward_chunk, backward_chunk
)
else:
+
def delete_all(txn):
txn.execute(
- "DELETE FROM port_from_sqlite3 WHERE table_name = %s",
- (table,)
+ "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,)
)
txn.execute("TRUNCATE %s CASCADE" % (table,))
@@ -232,11 +232,7 @@ class Porter(object):
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
- values={
- "table_name": table,
- "forward_rowid": 1,
- "backward_rowid": 0,
- }
+ values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
)
forward_chunk = 1
@@ -251,12 +247,16 @@ class Porter(object):
)
@defer.inlineCallbacks
- def handle_table(self, table, postgres_size, table_size, forward_chunk,
- backward_chunk):
+ def handle_table(
+ self, table, postgres_size, table_size, forward_chunk, backward_chunk
+ ):
logger.info(
"Table %s: %i/%i (rows %i-%i) already ported",
- table, postgres_size, table_size,
- backward_chunk+1, forward_chunk-1,
+ table,
+ postgres_size,
+ table_size,
+ backward_chunk + 1,
+ forward_chunk - 1,
)
if not table_size:
@@ -271,7 +271,9 @@ class Porter(object):
return
if table in (
- "user_directory", "user_directory_search", "users_who_share_rooms",
+ "user_directory",
+ "user_directory_search",
+ "users_who_share_rooms",
"users_in_pubic_room",
):
# We don't port these tables, as they're a faff and we can regenreate
@@ -283,37 +285,35 @@ class Porter(object):
# We need to make sure there is a single row, `(X, null), as that is
# what synapse expects to be there.
yield self.postgres_store._simple_insert(
- table=table,
- values={"stream_id": None},
+ table=table, values={"stream_id": None}
)
self.progress.update(table, table_size) # Mark table as done
return
forward_select = (
- "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
- % (table,)
+ "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,)
)
backward_select = (
- "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
- % (table,)
+ "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,)
)
do_forward = [True]
do_backward = [True]
while True:
+
def r(txn):
forward_rows = []
backward_rows = []
if do_forward[0]:
- txn.execute(forward_select, (forward_chunk, self.batch_size,))
+ txn.execute(forward_select, (forward_chunk, self.batch_size))
forward_rows = txn.fetchall()
if not forward_rows:
do_forward[0] = False
if do_backward[0]:
- txn.execute(backward_select, (backward_chunk, self.batch_size,))
+ txn.execute(backward_select, (backward_chunk, self.batch_size))
backward_rows = txn.fetchall()
if not backward_rows:
do_backward[0] = False
@@ -325,9 +325,7 @@ class Porter(object):
return headers, forward_rows, backward_rows
- headers, frows, brows = yield self.sqlite_store.runInteraction(
- "select", r
- )
+ headers, frows, brows = yield self.sqlite_store.runInteraction("select", r)
if frows or brows:
if frows:
@@ -339,9 +337,7 @@ class Porter(object):
rows = self._convert_rows(table, headers, rows)
def insert(txn):
- self.postgres_store.insert_many_txn(
- txn, table, headers[1:], rows
- )
+ self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
self.postgres_store._simple_update_one_txn(
txn,
@@ -362,8 +358,9 @@ class Porter(object):
return
@defer.inlineCallbacks
- def handle_search_table(self, postgres_size, table_size, forward_chunk,
- backward_chunk):
+ def handle_search_table(
+ self, postgres_size, table_size, forward_chunk, backward_chunk
+ ):
select = (
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
" FROM event_search as es"
@@ -373,8 +370,9 @@ class Porter(object):
)
while True:
+
def r(txn):
- txn.execute(select, (forward_chunk, self.batch_size,))
+ txn.execute(select, (forward_chunk, self.batch_size))
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
@@ -402,18 +400,21 @@ class Porter(object):
else:
rows_dict.append(d)
- txn.executemany(sql, [
- (
- row["event_id"],
- row["room_id"],
- row["key"],
- row["sender"],
- row["value"],
- row["origin_server_ts"],
- row["stream_ordering"],
- )
- for row in rows_dict
- ])
+ txn.executemany(
+ sql,
+ [
+ (
+ row["event_id"],
+ row["room_id"],
+ row["key"],
+ row["sender"],
+ row["value"],
+ row["origin_server_ts"],
+ row["stream_ordering"],
+ )
+ for row in rows_dict
+ ],
+ )
self.postgres_store._simple_update_one_txn(
txn,
@@ -437,7 +438,8 @@ class Porter(object):
def setup_db(self, db_config, database_engine):
db_conn = database_engine.module.connect(
**{
- k: v for k, v in db_config.get("args", {}).items()
+ k: v
+ for k, v in db_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
@@ -450,13 +452,11 @@ class Porter(object):
def run(self):
try:
sqlite_db_pool = adbapi.ConnectionPool(
- self.sqlite_config["name"],
- **self.sqlite_config["args"]
+ self.sqlite_config["name"], **self.sqlite_config["args"]
)
postgres_db_pool = adbapi.ConnectionPool(
- self.postgres_config["name"],
- **self.postgres_config["args"]
+ self.postgres_config["name"], **self.postgres_config["args"]
)
sqlite_engine = create_engine(sqlite_config)
@@ -465,9 +465,7 @@ class Porter(object):
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
self.postgres_store = Store(postgres_db_pool, postgres_engine)
- yield self.postgres_store.execute(
- postgres_engine.check_database
- )
+ yield self.postgres_store.execute(postgres_engine.check_database)
# Step 1. Set up databases.
self.progress.set_state("Preparing SQLite3")
@@ -477,6 +475,7 @@ class Porter(object):
self.setup_db(postgres_config, postgres_engine)
self.progress.set_state("Creating port tables")
+
def create_port_table(txn):
txn.execute(
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
@@ -501,10 +500,9 @@ class Porter(object):
)
try:
- yield self.postgres_store.runInteraction(
- "alter_table", alter_table
- )
- except Exception as e:
+ yield self.postgres_store.runInteraction("alter_table", alter_table)
+ except Exception:
+ # On Error Resume Next
pass
yield self.postgres_store.runInteraction(
@@ -514,11 +512,7 @@ class Porter(object):
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
- table="sqlite_master",
- keyvalues={
- "type": "table",
- },
- retcol="name",
+ table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
)
postgres_tables = yield self.postgres_store._simple_select_onecol(
@@ -545,18 +539,14 @@ class Porter(object):
# Step 4. Do the copying.
self.progress.set_state("Copying to postgres")
yield defer.gatherResults(
- [
- self.handle_table(*res)
- for res in setup_res
- ],
- consumeErrors=True,
+ [self.handle_table(*res) for res in setup_res], consumeErrors=True
)
# Step 5. Do final post-processing
yield self._setup_state_group_id_seq()
self.progress.done()
- except:
+ except Exception:
global end_error_exec_info
end_error_exec_info = sys.exc_info()
logger.exception("")
@@ -566,9 +556,7 @@ class Porter(object):
def _convert_rows(self, table, headers, rows):
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
- bool_cols = [
- i for i, h in enumerate(headers) if h in bool_col_names
- ]
+ bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names]
class BadValueException(Exception):
pass
@@ -577,18 +565,21 @@ class Porter(object):
if j in bool_cols:
return bool(col)
elif isinstance(col, string_types) and "\0" in col:
- logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
- raise BadValueException();
+ logger.warn(
+ "DROPPING ROW: NUL value in table %s col %s: %r",
+ table,
+ headers[j],
+ col,
+ )
+ raise BadValueException()
return col
outrows = []
for i, row in enumerate(rows):
try:
- outrows.append(tuple(
- conv(j, col)
- for j, col in enumerate(row)
- if j > 0
- ))
+ outrows.append(
+ tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
+ )
except BadValueException:
pass
@@ -616,9 +607,7 @@ class Porter(object):
return headers, [r for r in rows if r[ts_ind] < yesterday]
- headers, rows = yield self.sqlite_store.runInteraction(
- "select", r,
- )
+ headers, rows = yield self.sqlite_store.runInteraction("select", r)
rows = self._convert_rows("sent_transactions", headers, rows)
@@ -639,7 +628,7 @@ class Porter(object):
txn.execute(
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
" ORDER BY rowid ASC LIMIT 1",
- (yesterday,)
+ (yesterday,),
)
rows = txn.fetchall()
@@ -657,21 +646,17 @@ class Porter(object):
"table_name": "sent_transactions",
"forward_rowid": next_chunk,
"backward_rowid": 0,
- }
+ },
)
def get_sent_table_size(txn):
txn.execute(
- "SELECT count(*) FROM sent_transactions"
- " WHERE ts >= ?",
- (yesterday,)
+ "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
)
size, = txn.fetchone()
return int(size)
- remaining_count = yield self.sqlite_store.execute(
- get_sent_table_size
- )
+ remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
total_count = remaining_count + inserted_rows
@@ -680,13 +665,11 @@ class Porter(object):
@defer.inlineCallbacks
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
frows = yield self.sqlite_store.execute_sql(
- "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
- forward_chunk,
+ "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
)
brows = yield self.sqlite_store.execute_sql(
- "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
- backward_chunk,
+ "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
)
defer.returnValue(frows[0][0] + brows[0][0])
@@ -694,7 +677,7 @@ class Porter(object):
@defer.inlineCallbacks
def _get_already_ported_count(self, table):
rows = yield self.postgres_store.execute_sql(
- "SELECT count(*) FROM %s" % (table,),
+ "SELECT count(*) FROM %s" % (table,)
)
defer.returnValue(rows[0][0])
@@ -717,22 +700,21 @@ class Porter(object):
def _setup_state_group_id_seq(self):
def r(txn):
txn.execute("SELECT MAX(id) FROM state_groups")
- next_id = txn.fetchone()[0]+1
- txn.execute(
- "ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
- (next_id,),
- )
+ next_id = txn.fetchone()[0] + 1
+ txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
+
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
##############################################
-###### The following is simply UI stuff ######
+# The following is simply UI stuff
##############################################
class Progress(object):
"""Used to report progress of the port
"""
+
def __init__(self):
self.tables = {}
@@ -758,6 +740,7 @@ class Progress(object):
class CursesProgress(Progress):
"""Reports progress to a curses window
"""
+
def __init__(self, stdscr):
self.stdscr = stdscr
@@ -801,7 +784,7 @@ class CursesProgress(Progress):
duration = int(now) - int(self.start_time)
minutes, seconds = divmod(duration, 60)
- duration_str = '%02dm %02ds' % (minutes, seconds,)
+ duration_str = '%02dm %02ds' % (minutes, seconds)
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
@@ -814,16 +797,12 @@ class CursesProgress(Progress):
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
else:
est_remaining_str = "Unknown"
- status = (
- "Time spent: %s (est. remaining: %s)"
- % (duration_str, est_remaining_str,)
+ status = "Time spent: %s (est. remaining: %s)" % (
+ duration_str,
+ est_remaining_str,
)
- self.stdscr.addstr(
- 0, 0,
- status,
- curses.A_BOLD,
- )
+ self.stdscr.addstr(0, 0, status, curses.A_BOLD)
max_len = max([len(t) for t in self.tables.keys()])
@@ -831,9 +810,7 @@ class CursesProgress(Progress):
middle_space = 1
items = self.tables.items()
- items.sort(
- key=lambda i: (i[1]["perc"], i[0]),
- )
+ items.sort(key=lambda i: (i[1]["perc"], i[0]))
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
@@ -844,9 +821,7 @@ class CursesProgress(Progress):
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
self.stdscr.addstr(
- i + 2, left_margin + max_len - len(table),
- table,
- curses.A_BOLD | color,
+ i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color
)
size = 20
@@ -857,15 +832,13 @@ class CursesProgress(Progress):
)
self.stdscr.addstr(
- i + 2, left_margin + max_len + middle_space,
+ i + 2,
+ left_margin + max_len + middle_space,
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
)
if self.finished:
- self.stdscr.addstr(
- rows - 1, 0,
- "Press any key to exit...",
- )
+ self.stdscr.addstr(rows - 1, 0, "Press any key to exit...")
self.stdscr.refresh()
self.last_update = time.time()
@@ -877,29 +850,25 @@ class CursesProgress(Progress):
def set_state(self, state):
self.stdscr.clear()
- self.stdscr.addstr(
- 0, 0,
- state + "...",
- curses.A_BOLD,
- )
+ self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD)
self.stdscr.refresh()
class TerminalProgress(Progress):
"""Just prints progress to the terminal
"""
+
def update(self, table, num_done):
super(TerminalProgress, self).update(table, num_done)
data = self.tables[table]
- print "%s: %d%% (%d/%d)" % (
- table, data["perc"],
- data["num_done"], data["total"],
+ print(
+ "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"])
)
def set_state(self, state):
- print state + "..."
+ print(state + "...")
##############################################
@@ -909,34 +878,38 @@ class TerminalProgress(Progress):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script to port an existing synapse SQLite database to"
- " a new PostgreSQL database."
+ " a new PostgreSQL database."
)
parser.add_argument("-v", action='store_true')
parser.add_argument(
- "--sqlite-database", required=True,
+ "--sqlite-database",
+ required=True,
help="The snapshot of the SQLite database file. This must not be"
- " currently used by a running synapse server"
+ " currently used by a running synapse server",
)
parser.add_argument(
- "--postgres-config", type=argparse.FileType('r'), required=True,
- help="The database config file for the PostgreSQL database"
+ "--postgres-config",
+ type=argparse.FileType('r'),
+ required=True,
+ help="The database config file for the PostgreSQL database",
)
parser.add_argument(
- "--curses", action='store_true',
- help="display a curses based progress UI"
+ "--curses", action='store_true', help="display a curses based progress UI"
)
parser.add_argument(
- "--batch-size", type=int, default=1000,
+ "--batch-size",
+ type=int,
+ default=1000,
help="The number of rows to select from the SQLite table each"
- " iteration [default=1000]",
+ " iteration [default=1000]",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
- "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
+ "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
if args.curses:
diff --git a/setup.cfg b/setup.cfg
index c2620be6..b6b4aa74 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,17 +14,17 @@ ignore =
pylint.cfg
tox.ini
-[pep8]
-max-line-length = 90
-# W503 requires that binary operators be at the end, not start, of lines. Erik
-# doesn't like it. E203 is contrary to PEP8.
-ignore = W503,E203
-
[flake8]
-# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
-# pep8 to do those checks), but not the "max-line-length" setting
max-line-length = 90
+# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
+# for error codes. The ones we ignore are:
+# W503: line break before binary operator
+# W504: line break after binary operator
+# E203: whitespace before ':' (which is contrary to pep8?)
+# E731: do not assign a lambda expression, use a def
+ignore=W503,W504,E203,E731
+
[isort]
line_length = 89
not_skip = __init__.py
diff --git a/setup.py b/setup.py
index b00c2af3..00b69c43 100755
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python
-# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2014-2017 OpenMarket Ltd
+# Copyright 2017 Vector Creations Ltd
+# Copyright 2017-2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -86,7 +88,7 @@ setup(
name="matrix-synapse",
version=version,
packages=find_packages(exclude=["tests", "tests.*"]),
- description="Reference Synapse Home Server",
+ description="Reference homeserver for the Matrix decentralised comms protocol",
install_requires=dependencies['requirements'](include_conditional=True).keys(),
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
include_package_data=True,
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 65a2b894..5a28fe2b 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -17,4 +17,14 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.33.4"
+try:
+ from twisted.internet import protocol
+ from twisted.internet.protocol import Factory
+ from twisted.names.dns import DNSDatagramProtocol
+ protocol.Factory.noisy = False
+ Factory.noisy = False
+ DNSDatagramProtocol.noisy = False
+except ImportError:
+ pass
+
+__version__ = "0.33.9"
diff --git a/synapse/_scripts/__init__.py b/synapse/_scripts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/synapse/_scripts/__init__.py
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
new file mode 100644
index 00000000..70cecde4
--- /dev/null
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import argparse
+import getpass
+import hashlib
+import hmac
+import logging
+import sys
+
+from six.moves import input
+
+import requests as _requests
+import yaml
+
+
+def request_registration(
+ user,
+ password,
+ server_location,
+ shared_secret,
+ admin=False,
+ requests=_requests,
+ _print=print,
+ exit=sys.exit,
+):
+
+ url = "%s/_matrix/client/r0/admin/register" % (server_location,)
+
+ # Get the nonce
+ r = requests.get(url, verify=False)
+
+ if r.status_code is not 200:
+ _print("ERROR! Received %d %s" % (r.status_code, r.reason))
+ if 400 <= r.status_code < 500:
+ try:
+ _print(r.json()["error"])
+ except Exception:
+ pass
+ return exit(1)
+
+ nonce = r.json()["nonce"]
+
+ mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1)
+
+ mac.update(nonce.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(user.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(password.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(b"admin" if admin else b"notadmin")
+
+ mac = mac.hexdigest()
+
+ data = {
+ "nonce": nonce,
+ "username": user,
+ "password": password,
+ "mac": mac,
+ "admin": admin,
+ }
+
+ _print("Sending registration request...")
+ r = requests.post(url, json=data, verify=False)
+
+ if r.status_code is not 200:
+ _print("ERROR! Received %d %s" % (r.status_code, r.reason))
+ if 400 <= r.status_code < 500:
+ try:
+ _print(r.json()["error"])
+ except Exception:
+ pass
+ return exit(1)
+
+ _print("Success!")
+
+
+def register_new_user(user, password, server_location, shared_secret, admin):
+ if not user:
+ try:
+ default_user = getpass.getuser()
+ except Exception:
+ default_user = None
+
+ if default_user:
+ user = input("New user localpart [%s]: " % (default_user,))
+ if not user:
+ user = default_user
+ else:
+ user = input("New user localpart: ")
+
+ if not user:
+ print("Invalid user name")
+ sys.exit(1)
+
+ if not password:
+ password = getpass.getpass("Password: ")
+
+ if not password:
+ print("Password cannot be blank.")
+ sys.exit(1)
+
+ confirm_password = getpass.getpass("Confirm password: ")
+
+ if password != confirm_password:
+ print("Passwords do not match")
+ sys.exit(1)
+
+ if admin is None:
+ admin = input("Make admin [no]: ")
+ if admin in ("y", "yes", "true"):
+ admin = True
+ else:
+ admin = False
+
+ request_registration(user, password, server_location, shared_secret, bool(admin))
+
+
+def main():
+
+ logging.captureWarnings(True)
+
+ parser = argparse.ArgumentParser(
+ description="Used to register new users with a given home server when"
+ " registration has been disabled. The home server must be"
+ " configured with the 'registration_shared_secret' option"
+ " set."
+ )
+ parser.add_argument(
+ "-u",
+ "--user",
+ default=None,
+ help="Local part of the new user. Will prompt if omitted.",
+ )
+ parser.add_argument(
+ "-p",
+ "--password",
+ default=None,
+ help="New password for user. Will prompt if omitted.",
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ "-a",
+ "--admin",
+ action="store_true",
+ help=(
+ "Register new user as an admin. "
+ "Will prompt if --no-admin is not set either."
+ ),
+ )
+ admin_group.add_argument(
+ "--no-admin",
+ action="store_true",
+ help=(
+ "Register new user as a regular user. "
+ "Will prompt if --admin is not set either."
+ ),
+ )
+
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument(
+ "-c",
+ "--config",
+ type=argparse.FileType('r'),
+ help="Path to server config file. Used to read in shared secret.",
+ )
+
+ group.add_argument(
+ "-k", "--shared-secret", help="Shared secret as defined in server config file."
+ )
+
+ parser.add_argument(
+ "server_url",
+ default="https://localhost:8448",
+ nargs='?',
+ help="URL to use to talk to the home server. Defaults to "
+ " 'https://localhost:8448'.",
+ )
+
+ args = parser.parse_args()
+
+ if "config" in args and args.config:
+ config = yaml.safe_load(args.config)
+ secret = config.get("registration_shared_secret", None)
+ if not secret:
+ print("No 'registration_shared_secret' defined in config.")
+ sys.exit(1)
+ else:
+ secret = args.shared_secret
+
+ admin = None
+ if args.admin or args.no_admin:
+ admin = args.admin
+
+ register_new_user(args.user, args.password, args.server_url, secret, admin)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index c2630c4c..f20e0fcf 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -51,6 +51,7 @@ class LoginType(object):
EMAIL_IDENTITY = u"m.login.email.identity"
MSISDN = u"m.login.msisdn"
RECAPTCHA = u"m.login.recaptcha"
+ TERMS = u"m.login.terms"
DUMMY = u"m.login.dummy"
# Only for C/S API v1
@@ -61,6 +62,7 @@ class LoginType(object):
class EventTypes(object):
Member = "m.room.member"
Create = "m.room.create"
+ Tombstone = "m.room.tombstone"
JoinRules = "m.room.join_rules"
PowerLevels = "m.room.power_levels"
Aliases = "m.room.aliases"
@@ -101,6 +103,7 @@ class ThirdPartyEntityKind(object):
class RoomVersions(object):
V1 = "1"
VDH_TEST = "vdh-test-version"
+ STATE_V2_TEST = "state-v2-test"
# the version we will give rooms which are created on this server
@@ -108,7 +111,11 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
# vdh-test-version is a placeholder to get room versioning support working and tested
# until we have a working v2.
-KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
+KNOWN_ROOM_VERSIONS = {
+ RoomVersions.V1,
+ RoomVersions.VDH_TEST,
+ RoomVersions.STATE_V2_TEST,
+}
ServerNoticeMsgType = "m.server_notice"
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 2e7f9840..48b90337 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -59,6 +59,7 @@ class Codes(object):
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
+ WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
class CodeMessageException(RuntimeError):
@@ -312,6 +313,20 @@ class LimitExceededError(SynapseError):
)
+class RoomKeysVersionError(SynapseError):
+ """A client has tried to upload to a non-current version of the room_keys store
+ """
+ def __init__(self, current_version):
+ """
+ Args:
+ current_version (str): the current version of the store they should have used
+ """
+ super(RoomKeysVersionError, self).__init__(
+ 403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION
+ )
+ self.current_version = current_version
+
+
class IncompatibleRoomVersionError(SynapseError):
"""A server is trying to join a room whose version it does not support."""
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 186831e1..677c0bdd 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -172,7 +172,10 @@ USER_FILTER_SCHEMA = {
# events a lot easier as we can then use a negative lookbehind
# assertion to split '\.' If we allowed \\ then it would
# incorrectly split '\\.' See synapse.events.utils.serialize_event
- "pattern": "^((?!\\\).)*$"
+ #
+ # Note that because this is a regular expression, we have to escape
+ # each backslash in the pattern.
+ "pattern": r"^((?!\\\\).)*$"
}
}
},
@@ -226,7 +229,7 @@ class Filtering(object):
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
format_checker=FormatChecker())
except jsonschema.ValidationError as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
class FilterCollection(object):
@@ -251,6 +254,7 @@ class FilterCollection(object):
"include_leave", False
)
self.event_fields = filter_json.get("event_fields", [])
+ self.event_format = filter_json.get("event_format", "client")
def __repr__(self):
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 71347912..f78695b6 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1"
STATIC_PREFIX = "/_matrix/static"
WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"
-SERVER_KEY_PREFIX = "/_matrix/key/v1"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/r0"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
@@ -64,7 +63,7 @@ class ConsentURIBuilder(object):
"""
mac = hmac.new(
key=self._hmac_secret,
- msg=user_id,
+ msg=user_id.encode('ascii'),
digestmod=sha256,
).hexdigest()
consent_uri = "%s_matrix/consent?%s" % (
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index 3b6b9368..c3afcc57 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -24,7 +24,7 @@ try:
python_dependencies.check_requirements()
except python_dependencies.MissingRequirementError as e:
message = "\n".join([
- "Missing Requirement: %s" % (e.message,),
+ "Missing Requirement: %s" % (str(e),),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 7c866e24..18584226 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -17,6 +17,7 @@ import gc
import logging
import sys
+import psutil
from daemonize import Daemonize
from twisted.internet import error, reactor
@@ -24,12 +25,6 @@ from twisted.internet import error, reactor
from synapse.util import PreserveLoggingContext
from synapse.util.rlimit import change_resource_limit
-try:
- import affinity
-except Exception:
- affinity = None
-
-
logger = logging.getLogger(__name__)
@@ -89,15 +84,20 @@ def start_reactor(
with PreserveLoggingContext():
logger.info("Running")
if cpu_affinity is not None:
- if not affinity:
- quit_with_error(
- "Missing package 'affinity' required for cpu_affinity\n"
- "option\n\n"
- "Install by running:\n\n"
- " pip install affinity\n\n"
- )
- logger.info("Setting CPU affinity to %s" % cpu_affinity)
- affinity.set_process_affinity_mask(0, cpu_affinity)
+ # Turn the bitmask into bits, reverse it so we go from 0 up
+ mask_to_bits = bin(cpu_affinity)[2:][::-1]
+
+ cpus = []
+ cpu_num = 0
+
+ for i in mask_to_bits:
+ if i == "1":
+ cpus.append(cpu_num)
+ cpu_num += 1
+
+ p = psutil.Process()
+ p.cpu_affinity(cpus)
+
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 86b50674..8559e141 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -136,7 +136,7 @@ def start(config_options):
"Synapse appservice", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.appservice"
@@ -172,7 +172,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
- ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index ce2b113d..76aed8c6 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -153,7 +153,7 @@ def start(config_options):
"Synapse client reader", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.client_reader"
@@ -181,7 +181,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
- ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index f98e456e..e4a68715 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -169,7 +169,7 @@ def start(config_options):
"Synapse event creator", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.event_creator"
@@ -178,6 +178,9 @@ def start(config_options):
setup_logging(config, use_worker_options=True)
+ # This should only be done on the user directory worker or the master
+ config.update_user_directory = False
+
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -199,7 +202,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
- ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 60f59735..228a297f 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -140,7 +140,7 @@ def start(config_options):
"Synapse federation reader", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_reader"
@@ -168,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
- ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 60dd09aa..e9a99d76 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -160,7 +160,7 @@ def start(config_options):
"Synapse federation sender", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender"
@@ -201,7 +201,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
- ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
_base.start_worker_reactor("synapse-federation-sender", config)
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 8c0b9c67..f5c61dec 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -68,7 +68,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
"Authorization": auth_headers,
}
result = yield self.http_client.get_json(
- self.main_uri + request.uri,
+ self.main_uri + request.uri.decode('ascii'),
headers=headers,
)
defer.returnValue((200, result))
@@ -125,7 +125,7 @@ class KeyUploadServlet(RestServlet):
"Authorization": auth_headers,
}
result = yield self.http_client.post_json_get_json(
- self.main_uri + request.uri,
+ self.main_uri + request.uri.decode('ascii'),
body,
headers=headers,
)
@@ -228,7 +228,7 @@ def start(config_options):
"Synapse frontend proxy", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.frontend_proxy"
@@ -258,7 +258,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
- ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 3eb5b663..415374a2 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -20,6 +20,7 @@ import sys
from six import iteritems
+import psutil
from prometheus_client import Gauge
from twisted.application import service
@@ -36,7 +37,6 @@ from synapse.api.urls import (
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
- SERVER_KEY_PREFIX,
SERVER_KEY_V2_PREFIX,
STATIC_PREFIX,
WEB_CLIENT_PREFIX,
@@ -58,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
-from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.server import HomeServer
@@ -235,10 +234,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["keys", "federation"]:
- resources.update({
- SERVER_KEY_PREFIX: LocalKey(self),
- SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
- })
+ resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "webclient":
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
@@ -301,12 +297,16 @@ class SynapseHomeServer(HomeServer):
try:
database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e:
- quit_with_error(e.message)
+ quit_with_error(str(e))
# Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
+registered_reserved_users_mau_gauge = Gauge(
+ "synapse_admin_mau:registered_reserved_users",
+ "Registered users with reserved threepids"
+)
def setup(config_options):
@@ -324,7 +324,7 @@ def setup(config_options):
config_options,
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
if not config:
@@ -380,10 +380,8 @@ def setup(config_options):
def start():
hs.get_pusherpool().start()
- hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
- hs.get_federation_client().start_get_pdu_cache()
reactor.callWhenRunning(start)
@@ -453,6 +451,10 @@ def run(hs):
stats["homeserver"] = hs.config.server_name
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
+ version = sys.version_info
+ stats["python_version"] = "{}.{}.{}".format(
+ version.major, version.minor, version.micro
+ )
stats["total_users"] = yield hs.get_datastore().count_all_users()
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
@@ -496,7 +498,6 @@ def run(hs):
def performance_stats_init():
try:
- import psutil
process = psutil.Process()
# Ensure we can fetch both, and make the initial request for cpu_percent
# so the next request will use this as the initial point.
@@ -504,12 +505,9 @@ def run(hs):
process.cpu_percent(interval=None)
logger.info("report_stats can use psutil")
stats_process.append(process)
- except (ImportError, AttributeError):
- logger.warn(
- "report_stats enabled but psutil is not installed or incorrect version."
- " Disabling reporting of memory/cpu stats."
- " Ensuring psutil is available will help matrix.org track performance"
- " changes across releases."
+ except (AttributeError):
+ logger.warning(
+ "Unable to read memory/cpu stats. Disabling reporting."
)
def generate_user_daily_visit_stats():
@@ -524,25 +522,35 @@ def run(hs):
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
# monthly active user limiting functionality
- clock.looping_call(
- hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
- )
- hs.get_datastore().reap_monthly_active_users()
+ def reap_monthly_active_users():
+ return run_as_background_process(
+ "reap_monthly_active_users",
+ hs.get_datastore().reap_monthly_active_users,
+ )
+ clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
+ reap_monthly_active_users()
@defer.inlineCallbacks
def generate_monthly_active_users():
- count = 0
+ current_mau_count = 0
+ reserved_count = 0
+ store = hs.get_datastore()
if hs.config.limit_usage_by_mau:
- count = yield hs.get_datastore().get_monthly_active_count()
- current_mau_gauge.set(float(count))
+ current_mau_count = yield store.get_monthly_active_count()
+ reserved_count = yield store.get_registered_reserved_users_count()
+ current_mau_gauge.set(float(current_mau_count))
+ registered_reserved_users_mau_gauge.set(float(reserved_count))
max_mau_gauge.set(float(hs.config.max_mau_value))
- hs.get_datastore().initialise_reserved_users(
- hs.config.mau_limits_reserved_threepids
- )
- generate_monthly_active_users()
+ def start_generate_monthly_active_users():
+ return run_as_background_process(
+ "generate_monthly_active_users",
+ generate_monthly_active_users,
+ )
+
+ start_generate_monthly_active_users()
if hs.config.limit_usage_by_mau:
- clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
+ clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
# End of monthly active user settings
if hs.config.report_stats:
@@ -558,7 +566,7 @@ def run(hs):
clock.call_later(5 * 60, start_phone_stats_home)
if hs.config.daemonize and hs.config.print_pidfile:
- print (hs.config.pid_file)
+ print(hs.config.pid_file)
_base.start_reactor(
"synapse-homeserver",
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index e3dbb3b4..acc0487a 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -133,7 +133,7 @@ def start(config_options):
"Synapse media repository", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.media_repository"
@@ -168,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
- ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 244c604d..83b0863f 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
+from synapse.replication.slave.storage._base import __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
@@ -49,31 +50,31 @@ class PusherSlaveStore(
SlavedAccountDataStore
):
update_pusher_last_stream_ordering_and_success = (
- DataStore.update_pusher_last_stream_ordering_and_success.__func__
+ __func__(DataStore.update_pusher_last_stream_ordering_and_success)
)
update_pusher_failing_since = (
- DataStore.update_pusher_failing_since.__func__
+ __func__(DataStore.update_pusher_failing_since)
)
update_pusher_last_stream_ordering = (
- DataStore.update_pusher_last_stream_ordering.__func__
+ __func__(DataStore.update_pusher_last_stream_ordering)
)
get_throttle_params_by_room = (
- DataStore.get_throttle_params_by_room.__func__
+ __func__(DataStore.get_throttle_params_by_room)
)
set_throttle_params = (
- DataStore.set_throttle_params.__func__
+ __func__(DataStore.set_throttle_params)
)
get_time_of_last_push_action_before = (
- DataStore.get_time_of_last_push_action_before.__func__
+ __func__(DataStore.get_time_of_last_push_action_before)
)
get_profile_displayname = (
- DataStore.get_profile_displayname.__func__
+ __func__(DataStore.get_profile_displayname)
)
@@ -160,11 +161,11 @@ class PusherReplicationHandler(ReplicationClientHandler):
else:
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
elif stream_name == "events":
- self.pusher_pool.on_new_notifications(
+ yield self.pusher_pool.on_new_notifications(
token, token,
)
elif stream_name == "receipts":
- self.pusher_pool.on_new_receipts(
+ yield self.pusher_pool.on_new_receipts(
token, token, set(row.room_id for row in rows)
)
except Exception:
@@ -182,7 +183,7 @@ class PusherReplicationHandler(ReplicationClientHandler):
def start_pusher(self, user_id, app_id, pushkey):
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
- return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
+ return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
def start(config_options):
@@ -191,7 +192,7 @@ def start(config_options):
"Synapse pusher", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.pusher"
@@ -228,7 +229,6 @@ def start(config_options):
def start():
ps.get_pusherpool().start()
ps.get_datastore().start_profiling()
- ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 66623407..0354e82b 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -33,7 +33,7 @@ from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
-from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
@@ -147,7 +147,7 @@ class SynchrotronPresence(object):
and haven't come back yet. If there are poke the master about them.
"""
now = self.clock.time_msec()
- for user_id, last_sync_ms in self.users_going_offline.items():
+ for user_id, last_sync_ms in list(self.users_going_offline.items()):
if now - last_sync_ms > 10 * 1000:
self.users_going_offline.pop(user_id, None)
self.send_user_sync(user_id, False, last_sync_ms)
@@ -156,9 +156,9 @@ class SynchrotronPresence(object):
# TODO Hows this supposed to work?
pass
- get_states = PresenceHandler.get_states.__func__
- get_state = PresenceHandler.get_state.__func__
- current_state_for_users = PresenceHandler.current_state_for_users.__func__
+ get_states = __func__(PresenceHandler.get_states)
+ get_state = __func__(PresenceHandler.get_state)
+ current_state_for_users = __func__(PresenceHandler.current_state_for_users)
def user_syncing(self, user_id, affect_presence):
if affect_presence:
@@ -208,7 +208,7 @@ class SynchrotronPresence(object):
) for row in rows]
for state in states:
- self.user_to_current_state[row.user_id] = state
+ self.user_to_current_state[state.user_id] = state
stream_id = token
yield self.notify_from_replication(states, stream_id)
@@ -226,7 +226,15 @@ class SynchrotronPresence(object):
class SynchrotronTyping(object):
def __init__(self, hs):
self._latest_room_serial = 0
+ self._reset()
+
+ def _reset(self):
+ """
+ Reset the typing handler's data caches.
+ """
+ # map room IDs to serial numbers
self._room_serials = {}
+ # map room IDs to sets of users currently typing
self._room_typing = {}
def stream_positions(self):
@@ -236,6 +244,12 @@ class SynchrotronTyping(object):
return {"typing": self._latest_room_serial}
def process_replication_rows(self, token, rows):
+ if self._latest_room_serial > token:
+ # The master has gone backwards. To prevent inconsistent data, just
+ # clear everything.
+ self._reset()
+
+ # Set the latest serial token to whatever the server gave us.
self._latest_room_serial = token
for row in rows:
@@ -410,7 +424,7 @@ def start(config_options):
"Synapse synchrotron", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.synchrotron"
@@ -435,7 +449,6 @@ def start(config_options):
def start():
ss.get_datastore().start_profiling()
- ss.get_state_handler().start_caching()
reactor.callWhenRunning(start)
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
deleted file mode 100755
index d658f967..00000000
--- a/synapse/app/synctl.py
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import argparse
-import collections
-import errno
-import glob
-import os
-import os.path
-import signal
-import subprocess
-import sys
-import time
-
-from six import iteritems
-
-import yaml
-
-SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
-
-GREEN = "\x1b[1;32m"
-YELLOW = "\x1b[1;33m"
-RED = "\x1b[1;31m"
-NORMAL = "\x1b[m"
-
-
-def pid_running(pid):
- try:
- os.kill(pid, 0)
- return True
- except OSError as err:
- if err.errno == errno.EPERM:
- return True
- return False
-
-
-def write(message, colour=NORMAL, stream=sys.stdout):
- if colour == NORMAL:
- stream.write(message + "\n")
- else:
- stream.write(colour + message + NORMAL + "\n")
-
-
-def abort(message, colour=RED, stream=sys.stderr):
- write(message, colour, stream)
- sys.exit(1)
-
-
-def start(configfile):
- write("Starting ...")
- args = SYNAPSE
- args.extend(["--daemonize", "-c", configfile])
-
- try:
- subprocess.check_call(args)
- write("started synapse.app.homeserver(%r)" %
- (configfile,), colour=GREEN)
- except subprocess.CalledProcessError as e:
- write(
- "error starting (exit code: %d); see above for logs" % e.returncode,
- colour=RED,
- )
-
-
-def start_worker(app, configfile, worker_configfile):
- args = [
- "python", "-B",
- "-m", app,
- "-c", configfile,
- "-c", worker_configfile
- ]
-
- try:
- subprocess.check_call(args)
- write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
- except subprocess.CalledProcessError as e:
- write(
- "error starting %s(%r) (exit code: %d); see above for logs" % (
- app, worker_configfile, e.returncode,
- ),
- colour=RED,
- )
-
-
-def stop(pidfile, app):
- if os.path.exists(pidfile):
- pid = int(open(pidfile).read())
- try:
- os.kill(pid, signal.SIGTERM)
- write("stopped %s" % (app,), colour=GREEN)
- except OSError as err:
- if err.errno == errno.ESRCH:
- write("%s not running" % (app,), colour=YELLOW)
- elif err.errno == errno.EPERM:
- abort("Cannot stop %s: Operation not permitted" % (app,))
- else:
- abort("Cannot stop %s: Unknown error" % (app,))
-
-
-Worker = collections.namedtuple("Worker", [
- "app", "configfile", "pidfile", "cache_factor"
-])
-
-
-def main():
-
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "action",
- choices=["start", "stop", "restart"],
- help="whether to start, stop or restart the synapse",
- )
- parser.add_argument(
- "configfile",
- nargs="?",
- default="homeserver.yaml",
- help="the homeserver config file, defaults to homeserver.yaml",
- )
- parser.add_argument(
- "-w", "--worker",
- metavar="WORKERCONFIG",
- help="start or stop a single worker",
- )
- parser.add_argument(
- "-a", "--all-processes",
- metavar="WORKERCONFIGDIR",
- help="start or stop all the workers in the given directory"
- " and the main synapse process",
- )
-
- options = parser.parse_args()
-
- if options.worker and options.all_processes:
- write(
- 'Cannot use "--worker" with "--all-processes"',
- stream=sys.stderr
- )
- sys.exit(1)
-
- configfile = options.configfile
-
- if not os.path.exists(configfile):
- write(
- "No config file found\n"
- "To generate a config file, run '%s -c %s --generate-config"
- " --server-name=<server name>'\n" % (
- " ".join(SYNAPSE), options.configfile
- ),
- stream=sys.stderr,
- )
- sys.exit(1)
-
- with open(configfile) as stream:
- config = yaml.load(stream)
-
- pidfile = config["pid_file"]
- cache_factor = config.get("synctl_cache_factor")
- start_stop_synapse = True
-
- if cache_factor:
- os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
-
- cache_factors = config.get("synctl_cache_factors", {})
- for cache_name, factor in iteritems(cache_factors):
- os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
-
- worker_configfiles = []
- if options.worker:
- start_stop_synapse = False
- worker_configfile = options.worker
- if not os.path.exists(worker_configfile):
- write(
- "No worker config found at %r" % (worker_configfile,),
- stream=sys.stderr,
- )
- sys.exit(1)
- worker_configfiles.append(worker_configfile)
-
- if options.all_processes:
- # To start the main synapse with -a you need to add a worker file
- # with worker_app == "synapse.app.homeserver"
- start_stop_synapse = False
- worker_configdir = options.all_processes
- if not os.path.isdir(worker_configdir):
- write(
- "No worker config directory found at %r" % (worker_configdir,),
- stream=sys.stderr,
- )
- sys.exit(1)
- worker_configfiles.extend(sorted(glob.glob(
- os.path.join(worker_configdir, "*.yaml")
- )))
-
- workers = []
- for worker_configfile in worker_configfiles:
- with open(worker_configfile) as stream:
- worker_config = yaml.load(stream)
- worker_app = worker_config["worker_app"]
- if worker_app == "synapse.app.homeserver":
- # We need to special case all of this to pick up options that may
- # be set in the main config file or in this worker config file.
- worker_pidfile = (
- worker_config.get("pid_file")
- or pidfile
- )
- worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
- daemonize = worker_config.get("daemonize") or config.get("daemonize")
- assert daemonize, "Main process must have daemonize set to true"
-
- # The master process doesn't support using worker_* config.
- for key in worker_config:
- if key == "worker_app": # But we allow worker_app
- continue
- assert not key.startswith("worker_"), \
- "Main process cannot use worker_* config"
- else:
- worker_pidfile = worker_config["worker_pid_file"]
- worker_daemonize = worker_config["worker_daemonize"]
- assert worker_daemonize, "In config %r: expected '%s' to be True" % (
- worker_configfile, "worker_daemonize")
- worker_cache_factor = worker_config.get("synctl_cache_factor")
- workers.append(Worker(
- worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
- ))
-
- action = options.action
-
- if action == "stop" or action == "restart":
- for worker in workers:
- stop(worker.pidfile, worker.app)
-
- if start_stop_synapse:
- stop(pidfile, "synapse.app.homeserver")
-
- # Wait for synapse to actually shutdown before starting it again
- if action == "restart":
- running_pids = []
- if start_stop_synapse and os.path.exists(pidfile):
- running_pids.append(int(open(pidfile).read()))
- for worker in workers:
- if os.path.exists(worker.pidfile):
- running_pids.append(int(open(worker.pidfile).read()))
- if len(running_pids) > 0:
- write("Waiting for process to exit before restarting...")
- for running_pid in running_pids:
- while pid_running(running_pid):
- time.sleep(0.2)
- write("All processes exited; now restarting...")
-
- if action == "start" or action == "restart":
- if start_stop_synapse:
- # Check if synapse is already running
- if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
- abort("synapse.app.homeserver already running")
- start(configfile)
-
- for worker in workers:
- if worker.cache_factor:
- os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
-
- start_worker(worker.app, configfile, worker.configfile)
-
- if cache_factor:
- os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
- else:
- os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
-
-
-if __name__ == "__main__":
- main()
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index 96ffcaf0..0a5f62b5 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -188,7 +188,7 @@ def start(config_options):
"Synapse user directory", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.user_dir"
@@ -229,7 +229,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
- ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 6980e589..9ccc5a80 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -13,7 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-import urllib
+
+from six.moves import urllib
from prometheus_client import Counter
@@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient):
def query_user(self, service, user_id):
if service.url is None:
defer.returnValue(False)
- uri = service.url + ("/users/%s" % urllib.quote(user_id))
+ uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
response = None
try:
response = yield self.get_json(uri, {
@@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient):
def query_alias(self, service, alias):
if service.url is None:
defer.returnValue(False)
- uri = service.url + ("/rooms/%s" % urllib.quote(alias))
+ uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
response = None
try:
response = yield self.get_json(uri, {
@@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient):
service.url,
APP_SERVICE_PREFIX,
kind,
- urllib.quote(protocol)
+ urllib.parse.quote(protocol)
)
try:
response = yield self.get_json(uri, fields)
@@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = "%s%s/thirdparty/protocol/%s" % (
service.url,
APP_SERVICE_PREFIX,
- urllib.quote(protocol)
+ urllib.parse.quote(protocol)
)
try:
info = yield self.get_json(uri, {})
@@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient):
txn_id = str(txn_id)
uri = service.url + ("/transactions/%s" %
- urllib.quote(txn_id))
+ urllib.parse.quote(txn_id))
try:
yield self.put_json(
uri=uri,
diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
index 58c97a70..79fe9c3d 100644
--- a/synapse/config/__main__.py
+++ b/synapse/config/__main__.py
@@ -25,10 +25,10 @@ if __name__ == "__main__":
try:
config = HomeServerConfig.load_config("", sys.argv[3:])
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
- print (getattr(config, key))
+ print(getattr(config, key))
sys.exit(0)
else:
sys.stderr.write("Unknown command %r\n" % (action,))
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 3d2e90dd..14dae65e 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -106,10 +106,7 @@ class Config(object):
@classmethod
def check_file(cls, file_path, config_name):
if file_path is None:
- raise ConfigError(
- "Missing config for %s."
- % (config_name,)
- )
+ raise ConfigError("Missing config for %s." % (config_name,))
try:
os.stat(file_path)
except OSError as e:
@@ -128,9 +125,7 @@ class Config(object):
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(dir_path):
- raise ConfigError(
- "%s is not a directory" % (dir_path,)
- )
+ raise ConfigError("%s is not a directory" % (dir_path,))
return dir_path
@classmethod
@@ -156,21 +151,20 @@ class Config(object):
return results
def generate_config(
- self,
- config_dir_path,
- server_name,
- is_generating_file,
- report_stats=None,
+ self, config_dir_path, server_name, is_generating_file, report_stats=None
):
default_config = "# vim:ft=yaml\n"
- default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
- "default_config",
- config_dir_path=config_dir_path,
- server_name=server_name,
- is_generating_file=is_generating_file,
- report_stats=report_stats,
- ))
+ default_config += "\n\n".join(
+ dedent(conf)
+ for conf in self.invoke_all(
+ "default_config",
+ config_dir_path=config_dir_path,
+ server_name=server_name,
+ is_generating_file=is_generating_file,
+ report_stats=report_stats,
+ )
+ )
config = yaml.load(default_config)
@@ -178,23 +172,22 @@ class Config(object):
@classmethod
def load_config(cls, description, argv):
- config_parser = argparse.ArgumentParser(
- description=description,
- )
+ config_parser = argparse.ArgumentParser(description=description)
config_parser.add_argument(
- "-c", "--config-path",
+ "-c",
+ "--config-path",
action="append",
metavar="CONFIG_FILE",
help="Specify config file. Can be given multiple times and"
- " may specify directories containing *.yaml files."
+ " may specify directories containing *.yaml files.",
)
config_parser.add_argument(
"--keys-directory",
metavar="DIRECTORY",
help="Where files such as certs and signing keys are stored when"
- " their location is given explicitly in the config."
- " Defaults to the directory containing the last config file",
+ " their location is given explicitly in the config."
+ " Defaults to the directory containing the last config file",
)
config_args = config_parser.parse_args(argv)
@@ -203,9 +196,7 @@ class Config(object):
obj = cls()
obj.read_config_files(
- config_files,
- keys_directory=config_args.keys_directory,
- generate_keys=False,
+ config_files, keys_directory=config_args.keys_directory, generate_keys=False
)
return obj
@@ -213,38 +204,38 @@ class Config(object):
def load_or_generate_config(cls, description, argv):
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
- "-c", "--config-path",
+ "-c",
+ "--config-path",
action="append",
metavar="CONFIG_FILE",
help="Specify config file. Can be given multiple times and"
- " may specify directories containing *.yaml files."
+ " may specify directories containing *.yaml files.",
)
config_parser.add_argument(
"--generate-config",
action="store_true",
- help="Generate a config file for the server name"
+ help="Generate a config file for the server name",
)
config_parser.add_argument(
"--report-stats",
action="store",
help="Whether the generated config reports anonymized usage statistics",
- choices=["yes", "no"]
+ choices=["yes", "no"],
)
config_parser.add_argument(
"--generate-keys",
action="store_true",
- help="Generate any missing key files then exit"
+ help="Generate any missing key files then exit",
)
config_parser.add_argument(
"--keys-directory",
metavar="DIRECTORY",
help="Used with 'generate-*' options to specify where files such as"
- " certs and signing keys should be stored in, unless explicitly"
- " specified in the config."
+ " certs and signing keys should be stored in, unless explicitly"
+ " specified in the config.",
)
config_parser.add_argument(
- "-H", "--server-name",
- help="The server name to generate a config file for"
+ "-H", "--server-name", help="The server name to generate a config file for"
)
config_args, remaining_args = config_parser.parse_known_args(argv)
@@ -257,8 +248,8 @@ class Config(object):
if config_args.generate_config:
if config_args.report_stats is None:
config_parser.error(
- "Please specify either --report-stats=yes or --report-stats=no\n\n" +
- MISSING_REPORT_STATS_SPIEL
+ "Please specify either --report-stats=yes or --report-stats=no\n\n"
+ + MISSING_REPORT_STATS_SPIEL
)
if not config_files:
config_parser.error(
@@ -287,26 +278,32 @@ class Config(object):
config_dir_path=config_dir_path,
server_name=server_name,
report_stats=(config_args.report_stats == "yes"),
- is_generating_file=True
+ is_generating_file=True,
)
obj.invoke_all("generate_files", config)
config_file.write(config_str)
- print((
- "A config file has been generated in %r for server name"
- " %r with corresponding SSL keys and self-signed"
- " certificates. Please review this file and customise it"
- " to your needs."
- ) % (config_path, server_name))
+ print(
+ (
+ "A config file has been generated in %r for server name"
+ " %r with corresponding SSL keys and self-signed"
+ " certificates. Please review this file and customise it"
+ " to your needs."
+ )
+ % (config_path, server_name)
+ )
print(
"If this server name is incorrect, you will need to"
" regenerate the SSL certificates"
)
return
else:
- print((
- "Config file %r already exists. Generating any missing key"
- " files."
- ) % (config_path,))
+ print(
+ (
+ "Config file %r already exists. Generating any missing key"
+ " files."
+ )
+ % (config_path,)
+ )
generate_keys = True
parser = argparse.ArgumentParser(
@@ -338,8 +335,7 @@ class Config(object):
return obj
- def read_config_files(self, config_files, keys_directory=None,
- generate_keys=False):
+ def read_config_files(self, config_files, keys_directory=None, generate_keys=False):
if not keys_directory:
keys_directory = os.path.dirname(config_files[-1])
@@ -364,8 +360,9 @@ class Config(object):
if "report_stats" not in config:
raise ConfigError(
- MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
- MISSING_REPORT_STATS_SPIEL
+ MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS
+ + "\n"
+ + MISSING_REPORT_STATS_SPIEL
)
if generate_keys:
@@ -399,16 +396,16 @@ def find_config_files(search_paths):
for entry in os.listdir(config_path):
entry_path = os.path.join(config_path, entry)
if not os.path.isfile(entry_path):
- print (
- "Found subdirectory in config directory: %r. IGNORING."
- ) % (entry_path, )
+ err = "Found subdirectory in config directory: %r. IGNORING."
+ print(err % (entry_path,))
continue
if not entry.endswith(".yaml"):
- print (
- "Found file in config directory that does not"
- " end in '.yaml': %r. IGNORING."
- ) % (entry_path, )
+ err = (
+ "Found file in config directory that does not end in "
+ "'.yaml': %r. IGNORING."
+ )
+ print(err % (entry_path,))
continue
files.append(entry_path)
diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py
index e22c731a..f193a090 100644
--- a/synapse/config/consent_config.py
+++ b/synapse/config/consent_config.py
@@ -42,6 +42,14 @@ DEFAULT_CONFIG = """\
# until the user consents to the privacy policy. The value of the setting is
# used as the text of the error.
#
+# 'require_at_registration', if enabled, will add a step to the registration
+# process, similar to how captcha works. Users will be required to accept the
+# policy before their account is created.
+#
+# 'policy_name' is the display name of the policy users will see when registering
+# for an account. Has no effect unless `require_at_registration` is enabled.
+# Defaults to "Privacy Policy".
+#
# user_consent:
# template_dir: res/templates/privacy
# version: 1.0
@@ -54,6 +62,8 @@ DEFAULT_CONFIG = """\
# block_events_error: >-
# To continue using this homeserver you must review and agree to the
# terms and conditions at %(consent_uri)s
+# require_at_registration: False
+# policy_name: Privacy Policy
#
"""
@@ -67,6 +77,8 @@ class ConsentConfig(Config):
self.user_consent_server_notice_content = None
self.user_consent_server_notice_to_guests = False
self.block_events_without_consent_error = None
+ self.user_consent_at_registration = False
+ self.user_consent_policy_name = "Privacy Policy"
def read_config(self, config):
consent_config = config.get("user_consent")
@@ -83,6 +95,12 @@ class ConsentConfig(Config):
self.user_consent_server_notice_to_guests = bool(consent_config.get(
"send_server_notice_to_guests", False,
))
+ self.user_consent_at_registration = bool(consent_config.get(
+ "require_at_registration", False,
+ ))
+ self.user_consent_policy_name = consent_config.get(
+ "policy_name", "Privacy Policy",
+ )
def default_config(self, **kwargs):
return DEFAULT_CONFIG
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index fe156b69..93d70cff 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -13,10 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
# This file can't be called email.py because if it is, we cannot:
import email.utils
+import logging
+import os
+
+import pkg_resources
-from ._base import Config
+from ._base import Config, ConfigError
+
+logger = logging.getLogger(__name__)
class EmailConfig(Config):
@@ -38,7 +46,6 @@ class EmailConfig(Config):
"smtp_host",
"smtp_port",
"notif_from",
- "template_dir",
"notif_template_html",
"notif_template_text",
]
@@ -62,9 +69,26 @@ class EmailConfig(Config):
self.email_smtp_host = email_config["smtp_host"]
self.email_smtp_port = email_config["smtp_port"]
self.email_notif_from = email_config["notif_from"]
- self.email_template_dir = email_config["template_dir"]
self.email_notif_template_html = email_config["notif_template_html"]
self.email_notif_template_text = email_config["notif_template_text"]
+
+ template_dir = email_config.get("template_dir")
+ # we need an absolute path, because we change directory after starting (and
+ # we don't yet know what auxilliary templates like mail.css we will need).
+ # (Note that loading as package_resources with jinja.PackageLoader doesn't
+ # work for the same reason.)
+ if not template_dir:
+ template_dir = pkg_resources.resource_filename(
+ 'synapse', 'res/templates'
+ )
+ template_dir = os.path.abspath(template_dir)
+
+ for f in self.email_notif_template_text, self.email_notif_template_html:
+ p = os.path.join(template_dir, f)
+ if not os.path.isfile(p):
+ raise ConfigError("Unable to find email template file %s" % (p, ))
+ self.email_template_dir = template_dir
+
self.email_notif_for_new_users = email_config.get(
"notif_for_new_users", True
)
@@ -113,7 +137,9 @@ class EmailConfig(Config):
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
- # template_dir: res/templates
+ # # if template_dir is unset, uses the example templates that are part of
+ # # the Synapse distribution.
+ # #template_dir: res/templates
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
# notif_for_new_users: True
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 2fd9c48a..10dd4015 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -21,7 +21,7 @@ from .consent_config import ConsentConfig
from .database import DatabaseConfig
from .emailconfig import EmailConfig
from .groups import GroupsConfig
-from .jwt import JWTConfig
+from .jwt_config import JWTConfig
from .key import KeyConfig
from .logger import LoggingConfig
from .metrics import MetricsConfig
@@ -31,6 +31,7 @@ from .push import PushConfig
from .ratelimiting import RatelimitConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
+from .room_directory import RoomDirectoryConfig
from .saml2 import SAML2Config
from .server import ServerConfig
from .server_notices_config import ServerNoticesConfig
@@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
ConsentConfig,
- ServerNoticesConfig,
+ ServerNoticesConfig, RoomDirectoryConfig,
):
pass
diff --git a/synapse/config/jwt.py b/synapse/config/jwt_config.py
index 51e7f7e0..51e7f7e0 100644
--- a/synapse/config/jwt.py
+++ b/synapse/config/jwt_config.py
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 3f187adf..70818689 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -50,6 +50,7 @@ handlers:
maxBytes: 104857600
backupCount: 10
filters: [context]
+ encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
@@ -227,7 +228,22 @@ def setup_logging(config, use_worker_options=False):
#
# However this may not be too much of a problem if we are just writing to a file.
observer = STDLibLogObserver()
+
+ def _log(event):
+
+ if "log_text" in event:
+ if event["log_text"].startswith("DNSDatagramProtocol starting on "):
+ return
+
+ if event["log_text"].startswith("(UDP Port "):
+ return
+
+ if event["log_text"].startswith("Timing out client"):
+ return
+
+ return observer(event)
+
globalLogBeginner.beginLoggingTo(
- [observer],
+ [_log],
redirectStandardIO=not config.no_redirect_stdio,
)
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 0fb964eb..7480ed51 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -15,10 +15,10 @@
from distutils.util import strtobool
+from synapse.config._base import Config, ConfigError
+from synapse.types import RoomAlias
from synapse.util.stringutils import random_string_with_symbols
-from ._base import Config
-
class RegistrationConfig(Config):
@@ -44,6 +44,10 @@ class RegistrationConfig(Config):
)
self.auto_join_rooms = config.get("auto_join_rooms", [])
+ for room_alias in self.auto_join_rooms:
+ if not RoomAlias.is_valid(room_alias):
+ raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
+ self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
def default_config(self, **kwargs):
registration_shared_secret = random_string_with_symbols(50)
@@ -98,6 +102,13 @@ class RegistrationConfig(Config):
# to these rooms
#auto_join_rooms:
# - "#example:example.com"
+
+ # Where auto_join_rooms are specified, setting this flag ensures that the
+ # the rooms exist by creating them when the first user on the
+ # homeserver registers.
+ # Setting to false means that if the rooms are not manually created,
+ # users cannot be auto-joined since they do not exist.
+ autocreate_auto_join_rooms: true
""" % locals()
def add_arguments(self, parser):
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index fc909c1f..06c62ab6 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -178,7 +178,7 @@ class ContentRepositoryConfig(Config):
def default_config(self, **kwargs):
media_store = self.default_path("media_store")
uploads_path = self.default_path("uploads")
- return """
+ return r"""
# Directory where uploaded images and attachments are stored.
media_store_path: "%(media_store)s"
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
new file mode 100644
index 00000000..9da13ab1
--- /dev/null
+++ b/synapse/config/room_directory.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.util import glob_to_regex
+
+from ._base import Config, ConfigError
+
+
+class RoomDirectoryConfig(Config):
+ def read_config(self, config):
+ alias_creation_rules = config["alias_creation_rules"]
+
+ self._alias_creation_rules = [
+ _AliasRule(rule)
+ for rule in alias_creation_rules
+ ]
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ return """
+ # The `alias_creation` option controls who's allowed to create aliases
+ # on this server.
+ #
+ # The format of this option is a list of rules that contain globs that
+ # match against user_id and the new alias (fully qualified with server
+ # name). The action in the first rule that matches is taken, which can
+ # currently either be "allow" or "deny".
+ #
+ # If no rules match the request is denied.
+ alias_creation_rules:
+ - user_id: "*"
+ alias: "*"
+ action: allow
+ """
+
+ def is_alias_creation_allowed(self, user_id, alias):
+ """Checks if the given user is allowed to create the given alias
+
+ Args:
+ user_id (str)
+ alias (str)
+
+ Returns:
+ boolean: True if user is allowed to crate the alias
+ """
+ for rule in self._alias_creation_rules:
+ if rule.matches(user_id, alias):
+ return rule.action == "allow"
+
+ return False
+
+
+class _AliasRule(object):
+ def __init__(self, rule):
+ action = rule["action"]
+ user_id = rule["user_id"]
+ alias = rule["alias"]
+
+ if action in ("allow", "deny"):
+ self.action = action
+ else:
+ raise ConfigError(
+ "alias_creation_rules rules can only have action of 'allow'"
+ " or 'deny'"
+ )
+
+ try:
+ self._user_id_regex = glob_to_regex(user_id)
+ self._alias_regex = glob_to_regex(alias)
+ except Exception as e:
+ raise ConfigError("Failed to parse glob into regex: %s", e)
+
+ def matches(self, user_id, alias):
+ """Tests if this rule matches the given user_id and alias.
+
+ Args:
+ user_id (str)
+ alias (str)
+
+ Returns:
+ boolean
+ """
+
+ # Note: The regexes are anchored at both ends
+ if not self._user_id_regex.match(user_id):
+ return False
+
+ if not self._alias_regex.match(alias):
+ return False
+
+ return True
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 1a391ade..02b76dfc 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object):
def get_options(self, host):
return ClientTLSOptions(
- host.decode('utf-8'),
+ host,
CertificateOptions(verify=False).getContext()
)
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
index e94400b8..d40e4b85 100644
--- a/synapse/crypto/keyclient.py
+++ b/synapse/crypto/keyclient.py
@@ -15,6 +15,8 @@
import logging
+from six.moves import urllib
+
from canonicaljson import json
from twisted.internet import defer, reactor
@@ -28,15 +30,15 @@ from synapse.util import logcontext
logger = logging.getLogger(__name__)
-KEY_API_V1 = b"/_matrix/key/v1/"
+KEY_API_V2 = "/_matrix/key/v2/server/%s"
@defer.inlineCallbacks
-def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
+def fetch_server_key(server_name, tls_client_options_factory, key_id):
"""Fetch the keys for a remote server."""
factory = SynapseKeyClientFactory()
- factory.path = path
+ factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
factory.host = server_name
endpoint = matrix_federation_endpoint(
reactor, server_name, tls_client_options_factory, timeout=30
@@ -50,12 +52,12 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
defer.returnValue((server_response, server_certificate))
except SynapseKeyClientError as e:
logger.warn("Error getting key for %r: %s", server_name, e)
- if e.status.startswith("4"):
+ if e.status.startswith(b"4"):
# Don't retry for 4xx responses.
raise IOError("Cannot get key for %r" % server_name)
except (ConnectError, DomainError) as e:
logger.warn("Error getting key for %r: %s", server_name, e)
- except Exception as e:
+ except Exception:
logger.exception("Error getting key for %r", server_name)
raise IOError("Cannot get key for %r" % server_name)
@@ -82,6 +84,12 @@ class SynapseKeyClientProtocol(HTTPClient):
self._peer = self.transport.getPeer()
logger.debug("Connected to %s", self._peer)
+ if not isinstance(self.path, bytes):
+ self.path = self.path.encode('ascii')
+
+ if not isinstance(self.host, bytes):
+ self.host = self.host.encode('ascii')
+
self.sendCommand(b"GET", self.path)
if self.host:
self.sendHeader(b"Host", self.host)
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 30e27421..515ebbc1 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd.
+# Copyright 2017, 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
import hashlib
import logging
-import urllib
from collections import namedtuple
from signedjson.key import (
@@ -40,6 +39,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyclient import fetch_server_key
from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import (
+ LoggingContext,
PreserveLoggingContext,
preserve_fn,
run_in_background,
@@ -216,23 +216,34 @@ class Keyring(object):
servers have completed. Follows the synapse rules of logcontext
preservation.
"""
+ loop_count = 1
while True:
wait_on = [
- self.key_downloads[server_name]
+ (server_name, self.key_downloads[server_name])
for server_name in server_names
if server_name in self.key_downloads
]
- if wait_on:
- with PreserveLoggingContext():
- yield defer.DeferredList(wait_on)
- else:
+ if not wait_on:
break
+ logger.info(
+ "Waiting for existing lookups for %s to complete [loop %i]",
+ [w[0] for w in wait_on], loop_count,
+ )
+ with PreserveLoggingContext():
+ yield defer.DeferredList((w[1] for w in wait_on))
+
+ loop_count += 1
+
+ ctx = LoggingContext.current_context()
def rm(r, server_name_):
- self.key_downloads.pop(server_name_, None)
+ with PreserveLoggingContext(ctx):
+ logger.debug("Releasing key lookup lock on %s", server_name_)
+ self.key_downloads.pop(server_name_, None)
return r
for server_name, deferred in server_to_deferred.items():
+ logger.debug("Got key lookup lock on %s", server_name)
self.key_downloads[server_name] = deferred
deferred.addBoth(rm, server_name)
@@ -382,32 +393,13 @@ class Keyring(object):
@defer.inlineCallbacks
def get_keys_from_server(self, server_name_and_key_ids):
- @defer.inlineCallbacks
- def get_key(server_name, key_ids):
- keys = None
- try:
- keys = yield self.get_server_verify_key_v2_direct(
- server_name, key_ids
- )
- except Exception as e:
- logger.info(
- "Unable to get key %r for %r directly: %s %s",
- key_ids, server_name,
- type(e).__name__, str(e),
- )
-
- if not keys:
- keys = yield self.get_server_verify_key_v1_direct(
- server_name, key_ids
- )
-
- keys = {server_name: keys}
-
- defer.returnValue(keys)
-
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
- run_in_background(get_key, server_name, key_ids)
+ run_in_background(
+ self.get_server_verify_key_v2_direct,
+ server_name,
+ key_ids,
+ )
for server_name, key_ids in server_name_and_key_ids
],
consumeErrors=True,
@@ -432,7 +424,7 @@ class Keyring(object):
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
- path=b"/_matrix/key/v2/query",
+ path="/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
@@ -512,10 +504,7 @@ class Keyring(object):
continue
(response, tls_certificate) = yield fetch_server_key(
- server_name, self.hs.tls_client_options_factory,
- path=(b"/_matrix/key/v2/server/%s" % (
- urllib.quote(requested_key_id),
- )).encode("ascii"),
+ server_name, self.hs.tls_client_options_factory, requested_key_id
)
if (u"signatures" not in response
@@ -644,78 +633,6 @@ class Keyring(object):
defer.returnValue(results)
- @defer.inlineCallbacks
- def get_server_verify_key_v1_direct(self, server_name, key_ids):
- """Finds a verification key for the server with one of the key ids.
- Args:
- server_name (str): The name of the server to fetch a key for.
- keys_ids (list of str): The key_ids to check for.
- """
-
- # Try to fetch the key from the remote server.
-
- (response, tls_certificate) = yield fetch_server_key(
- server_name, self.hs.tls_client_options_factory
- )
-
- # Check the response.
-
- x509_certificate_bytes = crypto.dump_certificate(
- crypto.FILETYPE_ASN1, tls_certificate
- )
-
- if ("signatures" not in response
- or server_name not in response["signatures"]):
- raise KeyLookupError("Key response not signed by remote server")
-
- if "tls_certificate" not in response:
- raise KeyLookupError("Key response missing TLS certificate")
-
- tls_certificate_b64 = response["tls_certificate"]
-
- if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
- raise KeyLookupError("TLS certificate doesn't match")
-
- # Cache the result in the datastore.
-
- time_now_ms = self.clock.time_msec()
-
- verify_keys = {}
- for key_id, key_base64 in response["verify_keys"].items():
- if is_signing_algorithm_supported(key_id):
- key_bytes = decode_base64(key_base64)
- verify_key = decode_verify_key_bytes(key_id, key_bytes)
- verify_key.time_added = time_now_ms
- verify_keys[key_id] = verify_key
-
- for key_id in response["signatures"][server_name]:
- if key_id not in response["verify_keys"]:
- raise KeyLookupError(
- "Key response must include verification keys for all"
- " signatures"
- )
- if key_id in verify_keys:
- verify_signed_json(
- response,
- server_name,
- verify_keys[key_id]
- )
-
- yield self.store.store_server_certificate(
- server_name,
- server_name,
- time_now_ms,
- tls_certificate,
- )
-
- yield self.store_keys(
- server_name=server_name,
- from_server=server_name,
- verify_keys=verify_keys,
- )
-
- defer.returnValue(verify_keys)
-
def store_keys(self, server_name, from_server, verify_keys):
"""Store a collection of verify keys for a given server
Args:
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 6baeccca..c81d8e67 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
creation_event = auth_events.get((EventTypes.Create, ""), None)
if not creation_event:
- raise SynapseError(
+ raise AuthError(
403,
- "Room %r does not exist" % (event.room_id,)
+ "No create event in auth events",
)
creating_domain = get_domain_from_id(event.room_id)
@@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
if user_level < invite_level:
raise AuthError(
- 403, (
- "You cannot issue a third party invite for %s." %
- (event.content.display_name,)
- )
+ 403, "You don't have permission to invite users",
)
else:
logger.debug("Allowing! %s", event)
@@ -203,11 +200,11 @@ def _is_membership_change_allowed(event, auth_events):
membership = event.content["membership"]
# Check if this is the room creator joining:
- if len(event.prev_events) == 1 and Membership.JOIN == membership:
+ if len(event.prev_event_ids()) == 1 and Membership.JOIN == membership:
# Get room creation event:
key = (EventTypes.Create, "", )
create = auth_events.get(key)
- if create and event.prev_events[0][0] == create.event_id:
+ if create and event.prev_event_ids()[0] == create.event_id:
if create.content["creator"] == event.state_key:
return
@@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events):
if user_level < invite_level:
raise AuthError(
- 403, "You cannot invite user %s." % target_user_id
+ 403, "You don't have permission to invite users",
)
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were:
@@ -693,7 +690,7 @@ def auth_types_for_event(event):
auth_types = []
auth_types.append((EventTypes.PowerLevels, "", ))
- auth_types.append((EventTypes.Member, event.user_id, ))
+ auth_types.append((EventTypes.Member, event.sender, ))
auth_types.append((EventTypes.Create, "", ))
if event.type == EventTypes.Member:
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 51f9084b..84c75495 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -13,13 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
+from distutils.util import strtobool
+
+import six
+
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
-# bugs where we accidentally share e.g. signature dicts. However, converting
-# a dict to frozen_dicts is expensive.
-USE_FROZEN_DICTS = True
+# bugs where we accidentally share e.g. signature dicts. However, converting a
+# dict to frozen_dicts is expensive.
+#
+# NOTE: This is overridden by the configuration by the Synapse worker apps, but
+# for the sake of tests, it is set here while it cannot be configured on the
+# homeserver object itself.
+USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
class _EventInternalMetadata(object):
@@ -147,6 +156,27 @@ class EventBase(object):
def items(self):
return list(self._event_dict.items())
+ def keys(self):
+ return six.iterkeys(self._event_dict)
+
+ def prev_event_ids(self):
+ """Returns the list of prev event IDs. The order matches the order
+ specified in the event, though there is no meaning to it.
+
+ Returns:
+ list[str]: The list of event IDs of this event's prev_events
+ """
+ return [e for e, _ in self.prev_events]
+
+ def auth_event_ids(self):
+ """Returns the list of auth event IDs. The order matches the order
+ specified in the event, though there is no meaning to it.
+
+ Returns:
+ list[str]: The list of event IDs of this event's auth_events
+ """
+ return [e for e, _ in self.auth_events]
+
class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 5be8e66f..b7ad729c 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -143,11 +143,31 @@ class FederationBase(object):
def callback(_, pdu):
with logcontext.PreserveLoggingContext(ctx):
if not check_event_content_hash(pdu):
- logger.warn(
- "Event content has been tampered, redacting %s: %s",
- pdu.event_id, pdu.get_pdu_json()
- )
- return prune_event(pdu)
+ # let's try to distinguish between failures because the event was
+ # redacted (which are somewhat expected) vs actual ball-tampering
+ # incidents.
+ #
+ # This is just a heuristic, so we just assume that if the keys are
+ # about the same between the redacted and received events, then the
+ # received event was probably a redacted copy (but we then use our
+ # *actual* redacted copy to be on the safe side.)
+ redacted_event = prune_event(pdu)
+ if (
+ set(redacted_event.keys()) == set(pdu.keys()) and
+ set(six.iterkeys(redacted_event.content))
+ == set(six.iterkeys(pdu.content))
+ ):
+ logger.info(
+ "Event %s seems to have been redacted; using our redacted "
+ "copy",
+ pdu.event_id,
+ )
+ else:
+ logger.warning(
+ "Event %s content has been tampered, redacting",
+ pdu.event_id, pdu.get_pdu_json(),
+ )
+ return redacted_event
if self.spam_checker.check_event_for_spam(pdu):
logger.warn(
@@ -162,8 +182,8 @@ class FederationBase(object):
failure.trap(SynapseError)
with logcontext.PreserveLoggingContext(ctx):
logger.warn(
- "Signature check failed for %s",
- pdu.event_id,
+ "Signature check failed for %s: %s",
+ pdu.event_id, failure.getErrorMessage(),
)
return failure
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index c9f3c2d3..d05ed91d 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -66,6 +66,14 @@ class FederationClient(FederationBase):
self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client()
+ self._get_pdu_cache = ExpiringCache(
+ cache_name="get_pdu_cache",
+ clock=self._clock,
+ max_len=1000,
+ expiry_ms=120 * 1000,
+ reset_expiry_on_get=False,
+ )
+
def _clear_tried_cache(self):
"""Clear pdu_destination_tried cache"""
now = self._clock.time_msec()
@@ -82,17 +90,6 @@ class FederationClient(FederationBase):
if destination_dict:
self.pdu_destination_tried[event_id] = destination_dict
- def start_get_pdu_cache(self):
- self._get_pdu_cache = ExpiringCache(
- cache_name="get_pdu_cache",
- clock=self._clock,
- max_len=1000,
- expiry_ms=120 * 1000,
- reset_expiry_on_get=False,
- )
-
- self._get_pdu_cache.start()
-
@log_function
def make_query(self, destination, query_type, args,
retry_on_dns_fail=False, ignore_backoff=False):
@@ -212,8 +209,6 @@ class FederationClient(FederationBase):
Will attempt to get the PDU from each destination in the list until
one succeeds.
- This will persist the PDU locally upon receipt.
-
Args:
destinations (list): Which home servers to query
event_id (str): event to fetch
@@ -229,10 +224,9 @@ class FederationClient(FederationBase):
# TODO: Rate limit the number of times we try and get the same event.
- if self._get_pdu_cache:
- ev = self._get_pdu_cache.get(event_id)
- if ev:
- defer.returnValue(ev)
+ ev = self._get_pdu_cache.get(event_id)
+ if ev:
+ defer.returnValue(ev)
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
@@ -271,10 +265,10 @@ class FederationClient(FederationBase):
event_id, destination, e,
)
except NotRetryingDestination as e:
- logger.info(e.message)
+ logger.info(str(e))
continue
except FederationDeniedError as e:
- logger.info(e.message)
+ logger.info(str(e))
continue
except Exception as e:
pdu_attempts[destination] = now
@@ -285,7 +279,7 @@ class FederationClient(FederationBase):
)
continue
- if self._get_pdu_cache is not None and signed_pdu:
+ if signed_pdu:
self._get_pdu_cache[event_id] = signed_pdu
defer.returnValue(signed_pdu)
@@ -293,8 +287,7 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
@log_function
def get_state_for_room(self, destination, room_id, event_id):
- """Requests all of the `current` state PDUs for a given room from
- a remote home server.
+ """Requests all of the room state at a given event from a remote home server.
Args:
destination (str): The remote homeserver to query for the state.
@@ -302,9 +295,10 @@ class FederationClient(FederationBase):
event_id (str): The id of the event we want the state at.
Returns:
- Deferred: Results in a list of PDUs.
+ Deferred[Tuple[List[EventBase], List[EventBase]]]:
+ A list of events in the state, and a list of events in the auth chain
+ for the given event.
"""
-
try:
# First we try and ask for just the IDs, as thats far quicker if
# we have most of the state and auth_chain already.
@@ -510,7 +504,7 @@ class FederationClient(FederationBase):
else:
logger.warn(
"Failed to %s via %s: %i %s",
- description, destination, e.code, e.message,
+ description, destination, e.code, e.args[0],
)
except Exception:
logger.warn(
@@ -875,7 +869,7 @@ class FederationClient(FederationBase):
except Exception as e:
logger.exception(
"Failed to send_third_party_invite via %s: %s",
- destination, e.message
+ destination, str(e)
)
raise RuntimeError("Failed to send to any server.")
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 547c6aec..98722ae5 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-import re
import six
from six import iteritems
@@ -44,8 +43,10 @@ from synapse.replication.http.federation import (
ReplicationGetQueryRestServlet,
)
from synapse.types import get_domain_from_id
+from synapse.util import glob_to_regex
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
+from synapse.util.logcontext import nested_logging_context
from synapse.util.logutils import log_function
# when processing incoming transactions, we try to handle multiple rooms in
@@ -161,8 +162,30 @@ class FederationServer(FederationBase):
p["age_ts"] = request_time - int(p["age"])
del p["age"]
+ # We try and pull out an event ID so that if later checks fail we
+ # can log something sensible. We don't mandate an event ID here in
+ # case future event formats get rid of the key.
+ possible_event_id = p.get("event_id", "<Unknown>")
+
+ # Now we get the room ID so that we can check that we know the
+ # version of the room.
+ room_id = p.get("room_id")
+ if not room_id:
+ logger.info(
+ "Ignoring PDU as does not have a room_id. Event ID: %s",
+ possible_event_id,
+ )
+ continue
+
+ try:
+ # In future we will actually use the room version to parse the
+ # PDU into an event.
+ yield self.store.get_room_version(room_id)
+ except NotFoundError:
+ logger.info("Ignoring PDU for unknown room_id: %s", room_id)
+ continue
+
event = event_from_pdu_json(p)
- room_id = event.room_id
pdus_by_room.setdefault(room_id, []).append(event)
pdu_results = {}
@@ -187,21 +210,22 @@ class FederationServer(FederationBase):
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
- try:
- yield self._handle_received_pdu(
- origin, pdu
- )
- pdu_results[event_id] = {}
- except FederationError as e:
- logger.warn("Error handling PDU %s: %s", event_id, e)
- pdu_results[event_id] = {"error": str(e)}
- except Exception as e:
- f = failure.Failure()
- pdu_results[event_id] = {"error": str(e)}
- logger.error(
- "Failed to handle PDU %s: %s",
- event_id, f.getTraceback().rstrip(),
- )
+ with nested_logging_context(event_id):
+ try:
+ yield self._handle_received_pdu(
+ origin, pdu
+ )
+ pdu_results[event_id] = {}
+ except FederationError as e:
+ logger.warn("Error handling PDU %s: %s", event_id, e)
+ pdu_results[event_id] = {"error": str(e)}
+ except Exception as e:
+ f = failure.Failure()
+ pdu_results[event_id] = {"error": str(e)}
+ logger.error(
+ "Failed to handle PDU %s: %s",
+ event_id, f.getTraceback().rstrip(),
+ )
yield concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(),
@@ -322,11 +346,6 @@ class FederationServer(FederationBase):
defer.returnValue((404, ""))
@defer.inlineCallbacks
- @log_function
- def on_pull_request(self, origin, versions):
- raise NotImplementedError("Pull transactions not implemented")
-
- @defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
resp = yield self.registry.on_query(query_type, args)
@@ -505,19 +524,19 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
@log_function
def on_get_missing_events(self, origin, room_id, earliest_events,
- latest_events, limit, min_depth):
+ latest_events, limit):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
yield self.check_server_matches_acl(origin_host, room_id)
logger.info(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
- " limit: %d, min_depth: %d",
- earliest_events, latest_events, limit, min_depth
+ " limit: %d",
+ earliest_events, latest_events, limit,
)
missing_events = yield self.handler.on_get_missing_events(
- origin, room_id, earliest_events, latest_events, limit, min_depth
+ origin, room_id, earliest_events, latest_events, limit,
)
if len(missing_events) < 5:
@@ -618,7 +637,7 @@ class FederationServer(FederationBase):
)
yield self.handler.on_receive_pdu(
- origin, pdu, get_missing=True, sent_to_us_directly=True,
+ origin, pdu, sent_to_us_directly=True,
)
def __str__(self):
@@ -727,22 +746,10 @@ def _acl_entry_matches(server_name, acl_entry):
if not isinstance(acl_entry, six.string_types):
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
return False
- regex = _glob_to_regex(acl_entry)
+ regex = glob_to_regex(acl_entry)
return regex.match(server_name)
-def _glob_to_regex(glob):
- res = ''
- for c in glob:
- if c == '*':
- res = res + '.*'
- elif c == '?':
- res = res + '.'
- else:
- res = res + re.escape(c)
- return re.compile(res + "\\Z", re.IGNORECASE)
-
-
class FederationHandlerRegistry(object):
"""Allows classes to register themselves as handlers for a given EDU or
query type for incoming federation traffic.
@@ -798,7 +805,7 @@ class FederationHandlerRegistry(object):
yield handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception as e:
+ except Exception:
logger.exception("Failed to handle edu %r", edu_type)
def on_query(self, query_type, args):
@@ -838,9 +845,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
)
return self._send_edu(
- edu_type=edu_type,
- origin=origin,
- content=content,
+ edu_type=edu_type,
+ origin=origin,
+ content=content,
)
def on_query(self, query_type, args):
@@ -851,6 +858,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
return handler(args)
return self._get_query_client(
- query_type=query_type,
- args=args,
+ query_type=query_type,
+ args=args,
)
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index 94d7423d..099ace28 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -137,26 +137,6 @@ class TransactionQueue(object):
self._processing_pending_presence = False
- def can_send_to(self, destination):
- """Can we send messages to the given server?
-
- We can't send messages to ourselves. If we are running on localhost
- then we can only federation with other servers running on localhost.
- Otherwise we only federate with servers on a public domain.
-
- Args:
- destination(str): The server we are possibly trying to send to.
- Returns:
- bool: True if we can send to the server.
- """
-
- if destination == self.server_name:
- return False
- if self.server_name.startswith("localhost"):
- return destination.startswith("localhost")
- else:
- return not destination.startswith("localhost")
-
def notify_new_events(self, current_id):
"""This gets called when we have some new events we might want to
send out to other servers.
@@ -203,9 +183,7 @@ class TransactionQueue(object):
# banned then it won't receive the event because it won't
# be in the room after the ban.
destinations = yield self.state.get_current_hosts_in_room(
- event.room_id, latest_event_ids=[
- prev_id for prev_id, _ in event.prev_events
- ],
+ event.room_id, latest_event_ids=event.prev_event_ids(),
)
except Exception:
logger.exception(
@@ -279,10 +257,7 @@ class TransactionQueue(object):
self._order += 1
destinations = set(destinations)
- destinations = set(
- dest for dest in destinations if self.can_send_to(dest)
- )
-
+ destinations.discard(self.server_name)
logger.debug("Sending to: %s", str(destinations))
if not destinations:
@@ -358,7 +333,7 @@ class TransactionQueue(object):
for destinations, states in hosts_and_states:
for destination in destinations:
- if not self.can_send_to(destination):
+ if destination == self.server_name:
continue
self.pending_presence_by_dest.setdefault(
@@ -377,7 +352,8 @@ class TransactionQueue(object):
content=content,
)
- if not self.can_send_to(destination):
+ if destination == self.server_name:
+ logger.info("Not sending EDU to ourselves")
return
sent_edus_counter.inc()
@@ -392,10 +368,8 @@ class TransactionQueue(object):
self._attempt_new_transaction(destination)
def send_device_messages(self, destination):
- if destination == self.server_name or destination == "localhost":
- return
-
- if not self.can_send_to(destination):
+ if destination == self.server_name:
+ logger.info("Not sending device update to ourselves")
return
self._attempt_new_transaction(destination)
@@ -463,7 +437,19 @@ class TransactionQueue(object):
# pending_transactions flag.
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
+
+ # We can only include at most 50 PDUs per transactions
+ pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
+ if leftover_pdus:
+ self.pending_pdus_by_dest[destination] = leftover_pdus
+
pending_edus = self.pending_edus_by_dest.pop(destination, [])
+
+ # We can only include at most 100 EDUs per transactions
+ pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
+ if leftover_edus:
+ self.pending_edus_by_dest[destination] = leftover_edus
+
pending_presence = self.pending_presence_by_dest.pop(destination, {})
pending_edus.extend(
@@ -645,14 +631,6 @@ class TransactionQueue(object):
transaction, json_data_cb
)
code = 200
-
- if response:
- for e_id, r in response.get("pdus", {}).items():
- if "error" in r:
- logger.warn(
- "Transaction returned error for %s: %s",
- e_id, r,
- )
except HttpResponseException as e:
code = e.code
response = e.response
@@ -669,19 +647,24 @@ class TransactionQueue(object):
destination, txn_id, code
)
- logger.debug("TX [%s] Sent transaction", destination)
- logger.debug("TX [%s] Marking as delivered...", destination)
-
yield self.transaction_actions.delivered(
transaction, code, response
)
- logger.debug("TX [%s] Marked as delivered", destination)
+ logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
- if code != 200:
+ if code == 200:
+ for e_id, r in response.get("pdus", {}).items():
+ if "error" in r:
+ logger.warn(
+ "TX [%s] {%s} Remote returned error for %s: %s",
+ destination, txn_id, e_id, r,
+ )
+ else:
for p in pdus:
- logger.info(
- "Failed to send event %s to %s", p.event_id, destination
+ logger.warn(
+ "TX [%s] {%s} Failed to send event %s",
+ destination, txn_id, p.event_id,
)
success = False
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 1054441c..edba5a98 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -15,7 +15,8 @@
# limitations under the License.
import logging
-import urllib
+
+from six.moves import urllib
from twisted.internet import defer
@@ -142,9 +143,17 @@ class TransportLayerClient(object):
transaction (Transaction)
Returns:
- Deferred: Results of the deferred is a tuple in the form of
- (response_code, response_body) where the response_body is a
- python dict decoded from json
+ Deferred: Succeeds when we get a 2xx HTTP response. The result
+ will be the decoded JSON body.
+
+ Fails with ``HTTPRequestException`` if we get an HTTP response
+ code >= 300.
+
+ Fails with ``NotRetryingDestination`` if we are not yet ready
+ to retry this server.
+
+ Fails with ``FederationDeniedError`` if this destination
+ is not on our federation whitelist
"""
logger.debug(
"send_data dest=%s, txid=%s",
@@ -169,11 +178,6 @@ class TransportLayerClient(object):
backoff_on_404=True, # If we get a 404 the other side has gone
)
- logger.debug(
- "send_data dest=%s, txid=%s, got response: 200",
- transaction.destination, transaction.transaction_id,
- )
-
defer.returnValue(response)
@defer.inlineCallbacks
@@ -951,4 +955,4 @@ def _create_path(prefix, path, *args):
Returns:
str
"""
- return prefix + path % tuple(urllib.quote(arg, "") for arg in args)
+ return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 3972922f..3553f418 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -90,8 +90,8 @@ class Authenticator(object):
@defer.inlineCallbacks
def authenticate_request(self, request, content):
json_request = {
- "method": request.method,
- "uri": request.uri,
+ "method": request.method.decode('ascii'),
+ "uri": request.uri.decode('ascii'),
"destination": self.server_name,
"signatures": {},
}
@@ -252,7 +252,7 @@ class BaseFederationServlet(object):
by the callback method. None if the request has already been handled.
"""
content = None
- if request.method in ["PUT", "POST"]:
+ if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request)
@@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet):
defer.returnValue((code, response))
-class FederationPullServlet(BaseFederationServlet):
- PATH = "/pull/"
-
- # This is for when someone asks us for everything since version X
- def on_GET(self, origin, content, query):
- return self.handler.on_pull_request(query["origin"][0], query["v"])
-
-
class FederationEventServlet(BaseFederationServlet):
PATH = "/event/(?P<event_id>[^/]*)/"
@@ -386,7 +378,7 @@ class FederationStateServlet(BaseFederationServlet):
return self.handler.on_context_state_request(
origin,
context,
- query.get("event_id", [None])[0],
+ parse_string_from_args(query, "event_id", None),
)
@@ -397,7 +389,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
return self.handler.on_state_ids_request(
origin,
room_id,
- query.get("event_id", [None])[0],
+ parse_string_from_args(query, "event_id", None),
)
@@ -405,14 +397,12 @@ class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/(?P<context>[^/]*)/"
def on_GET(self, origin, content, query, context):
- versions = query["v"]
- limits = query["limit"]
+ versions = [x.decode('ascii') for x in query[b"v"]]
+ limit = parse_integer_from_args(query, "limit", None)
- if not limits:
+ if not limit:
return defer.succeed((400, {"error": "Did not include limit param"}))
- limit = int(limits[-1])
-
return self.handler.on_backfill_request(origin, context, versions, limit)
@@ -423,7 +413,7 @@ class FederationQueryServlet(BaseFederationServlet):
def on_GET(self, origin, content, query, query_type):
return self.handler.on_query_request(
query_type,
- {k: v[0].decode("utf-8") for k, v in query.items()}
+ {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()}
)
@@ -562,7 +552,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_POST(self, origin, content, query, room_id):
limit = int(content.get("limit", 10))
- min_depth = int(content.get("min_depth", 0))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
@@ -571,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
- min_depth=min_depth,
limit=limit,
)
@@ -630,14 +618,14 @@ class OpenIdUserInfo(BaseFederationServlet):
@defer.inlineCallbacks
def on_GET(self, origin, content, query):
- token = query.get("access_token", [None])[0]
+ token = query.get(b"access_token", [None])[0]
if token is None:
defer.returnValue((401, {
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
}))
return
- user_id = yield self.handler.on_openid_userinfo(token)
+ user_id = yield self.handler.on_openid_userinfo(token.decode('ascii'))
if user_id is None:
defer.returnValue((401, {
@@ -1265,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
FEDERATION_SERVLET_CLASSES = (
FederationSendServlet,
- FederationPullServlet,
FederationEventServlet,
FederationStateServlet,
FederationStateIdsServlet,
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index c5ab1431..025a79c0 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -117,9 +117,6 @@ class Transaction(JsonEncodedObject):
"Require 'transaction_id' to construct a Transaction"
)
- for p in pdus:
- p.transaction_id = kwargs["transaction_id"]
-
kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
return Transaction(**kwargs)
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index f0f89af7..17eedf4d 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -28,6 +28,7 @@ from synapse.metrics import (
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import log_failure
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.metrics import Measure
@@ -36,17 +37,6 @@ logger = logging.getLogger(__name__)
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
-def log_failure(failure):
- logger.error(
- "Application Services Failure",
- exc_info=(
- failure.type,
- failure.value,
- failure.getTracebackObject()
- )
- )
-
-
class ApplicationServicesHandler(object):
def __init__(self, hs):
@@ -112,7 +102,10 @@ class ApplicationServicesHandler(object):
if not self.started_scheduler:
def start_scheduler():
- return self.scheduler.start().addErrback(log_failure)
+ return self.scheduler.start().addErrback(
+ log_failure, "Application Services Failure",
+ )
+
run_as_background_process("as_scheduler", start_scheduler)
self.started_scheduler = True
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 4a81bd2b..a958c452 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -22,7 +22,7 @@ import bcrypt
import pymacaroons
from canonicaljson import json
-from twisted.internet import defer, threads
+from twisted.internet import defer
from twisted.web.client import PartialDownloadError
import synapse.util.stringutils as stringutils
@@ -37,8 +37,8 @@ from synapse.api.errors import (
)
from synapse.module_api import ModuleApi
from synapse.types import UserID
+from synapse.util import logcontext
from synapse.util.caches.expiringcache import ExpiringCache
-from synapse.util.logcontext import make_deferred_yieldable
from ._base import BaseHandler
@@ -59,6 +59,7 @@ class AuthHandler(BaseHandler):
LoginType.EMAIL_IDENTITY: self._check_email_identity,
LoginType.MSISDN: self._check_msisdn,
LoginType.DUMMY: self._check_dummy_auth,
+ LoginType.TERMS: self._check_terms_auth,
}
self.bcrypt_rounds = hs.config.bcrypt_rounds
@@ -431,6 +432,9 @@ class AuthHandler(BaseHandler):
def _check_dummy_auth(self, authdict, _):
return defer.succeed(True)
+ def _check_terms_auth(self, authdict, _):
+ return defer.succeed(True)
+
@defer.inlineCallbacks
def _check_threepid(self, medium, authdict):
if 'threepid_creds' not in authdict:
@@ -462,6 +466,22 @@ class AuthHandler(BaseHandler):
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
+ def _get_params_terms(self):
+ return {
+ "policies": {
+ "privacy_policy": {
+ "version": self.hs.config.user_consent_version,
+ "en": {
+ "name": self.hs.config.user_consent_policy_name,
+ "url": "%s/_matrix/consent?v=%s" % (
+ self.hs.config.public_baseurl,
+ self.hs.config.user_consent_version,
+ ),
+ },
+ },
+ },
+ }
+
def _auth_dict_for_flows(self, flows, session):
public_flows = []
for f in flows:
@@ -469,6 +489,7 @@ class AuthHandler(BaseHandler):
get_params = {
LoginType.RECAPTCHA: self._get_params_recaptcha,
+ LoginType.TERMS: self._get_params_terms,
}
params = {}
@@ -884,40 +905,32 @@ class AuthHandler(BaseHandler):
bcrypt.gensalt(self.bcrypt_rounds),
).decode('ascii')
- return make_deferred_yieldable(
- threads.deferToThreadPool(
- self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash
- ),
- )
+ return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
def validate_hash(self, password, stored_hash):
"""Validates that self.hash(password) == stored_hash.
Args:
password (unicode): Password to hash.
- stored_hash (unicode): Expected hash value.
+ stored_hash (bytes): Expected hash value.
Returns:
Deferred(bool): Whether self.hash(password) == stored_hash.
"""
-
def _do_validate_hash():
# Normalise the Unicode in the password
pw = unicodedata.normalize("NFKC", password)
return bcrypt.checkpw(
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
- stored_hash.encode('utf8')
+ stored_hash
)
if stored_hash:
- return make_deferred_yieldable(
- threads.deferToThreadPool(
- self.hs.get_reactor(),
- self.hs.get_reactor().getThreadPool(),
- _do_validate_hash,
- ),
- )
+ if not isinstance(stored_hash, bytes):
+ stored_hash = stored_hash.encode('ascii')
+
+ return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
else:
return defer.succeed(False)
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index b078df4a..75fe50c4 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -17,8 +17,8 @@ import logging
from twisted.internet import defer
from synapse.api.errors import SynapseError
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, create_requester
-from synapse.util.logcontext import run_in_background
from ._base import BaseHandler
@@ -121,7 +121,7 @@ class DeactivateAccountHandler(BaseHandler):
None
"""
if not self._user_parter_running:
- run_in_background(self._user_parter_loop)
+ run_as_background_process("user_parter_loop", self._user_parter_loop)
@defer.inlineCallbacks
def _user_parter_loop(self):
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index ef866da1..0699731c 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -20,7 +20,14 @@ import string
from twisted.internet import defer
from synapse.api.constants import EventTypes
-from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError
+from synapse.api.errors import (
+ AuthError,
+ CodeMessageException,
+ Codes,
+ NotFoundError,
+ StoreError,
+ SynapseError,
+)
from synapse.types import RoomAlias, UserID, get_domain_from_id
from ._base import BaseHandler
@@ -36,6 +43,7 @@ class DirectoryHandler(BaseHandler):
self.state = hs.get_state_handler()
self.appservice_handler = hs.get_application_service_handler()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.config = hs.config
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
@@ -73,43 +81,96 @@ class DirectoryHandler(BaseHandler):
)
@defer.inlineCallbacks
- def create_association(self, user_id, room_alias, room_id, servers=None):
- # association creation for human users
- # TODO(erikj): Do user auth.
+ def create_association(self, requester, room_alias, room_id, servers=None,
+ send_event=True):
+ """Attempt to create a new alias
- if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
- raise SynapseError(
- 403, "This user is not permitted to create this alias",
- )
+ Args:
+ requester (Requester)
+ room_alias (RoomAlias)
+ room_id (str)
+ servers (list[str]|None): List of servers that others servers
+ should try and join via
+ send_event (bool): Whether to send an updated m.room.aliases event
- can_create = yield self.can_modify_alias(
- room_alias,
- user_id=user_id
- )
- if not can_create:
- raise SynapseError(
- 400, "This alias is reserved by an application service.",
- errcode=Codes.EXCLUSIVE
+ Returns:
+ Deferred
+ """
+
+ user_id = requester.user.to_string()
+
+ service = requester.app_service
+ if service:
+ if not service.is_interested_in_alias(room_alias.to_string()):
+ raise SynapseError(
+ 400, "This application service has not reserved"
+ " this kind of alias.", errcode=Codes.EXCLUSIVE
+ )
+ else:
+ if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
+ raise AuthError(
+ 403, "This user is not permitted to create this alias",
+ )
+
+ if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
+ # Lets just return a generic message, as there may be all sorts of
+ # reasons why we said no. TODO: Allow configurable error messages
+ # per alias creation rule?
+ raise SynapseError(
+ 403, "Not allowed to create alias",
+ )
+
+ can_create = yield self.can_modify_alias(
+ room_alias,
+ user_id=user_id
)
+ if not can_create:
+ raise AuthError(
+ 400, "This alias is reserved by an application service.",
+ errcode=Codes.EXCLUSIVE
+ )
+
yield self._create_association(room_alias, room_id, servers, creator=user_id)
+ if send_event:
+ yield self.send_room_alias_update_event(
+ requester,
+ room_id
+ )
@defer.inlineCallbacks
- def create_appservice_association(self, service, room_alias, room_id,
- servers=None):
- if not service.is_interested_in_alias(room_alias.to_string()):
- raise SynapseError(
- 400, "This application service has not reserved"
- " this kind of alias.", errcode=Codes.EXCLUSIVE
- )
+ def delete_association(self, requester, room_alias, send_event=True):
+ """Remove an alias from the directory
- # association creation for app services
- yield self._create_association(room_alias, room_id, servers)
+ (this is only meant for human users; AS users should call
+ delete_appservice_association)
- @defer.inlineCallbacks
- def delete_association(self, requester, user_id, room_alias):
- # association deletion for human users
+ Args:
+ requester (Requester):
+ room_alias (RoomAlias):
+ send_event (bool): Whether to send an updated m.room.aliases event.
+ Note that, if we delete the canonical alias, we will always attempt
+ to send an m.room.canonical_alias event
+
+ Returns:
+ Deferred[unicode]: room id that the alias used to point to
+
+ Raises:
+ NotFoundError: if the alias doesn't exist
+
+ AuthError: if the user doesn't have perms to delete the alias (ie, the user
+ is neither the creator of the alias, nor a server admin.
+
+ SynapseError: if the alias belongs to an AS
+ """
+ user_id = requester.user.to_string()
+
+ try:
+ can_delete = yield self._user_can_delete_alias(room_alias, user_id)
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError("Unknown room alias")
+ raise
- can_delete = yield self._user_can_delete_alias(room_alias, user_id)
if not can_delete:
raise AuthError(
403, "You don't have permission to delete the alias.",
@@ -128,11 +189,11 @@ class DirectoryHandler(BaseHandler):
room_id = yield self._delete_association(room_alias)
try:
- yield self.send_room_alias_update_event(
- requester,
- requester.user.to_string(),
- room_id
- )
+ if send_event:
+ yield self.send_room_alias_update_event(
+ requester,
+ room_id
+ )
yield self._update_canonical_alias(
requester,
@@ -248,7 +309,7 @@ class DirectoryHandler(BaseHandler):
)
@defer.inlineCallbacks
- def send_room_alias_update_event(self, requester, user_id, room_id):
+ def send_room_alias_update_event(self, requester, room_id):
aliases = yield self.store.get_aliases_for_room(room_id)
yield self.event_creation_handler.create_and_send_nonmember_event(
@@ -257,7 +318,7 @@ class DirectoryHandler(BaseHandler):
"type": EventTypes.Aliases,
"state_key": self.hs.hostname,
"room_id": room_id,
- "sender": user_id,
+ "sender": requester.user.to_string(),
"content": {"aliases": aliases},
},
ratelimit=False
@@ -320,7 +381,7 @@ class DirectoryHandler(BaseHandler):
def _user_can_delete_alias(self, alias, user_id):
creator = yield self.store.get_room_alias_creator(alias.to_string())
- if creator and creator == user_id:
+ if creator is not None and creator == user_id:
defer.returnValue(True)
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 5816bf8b..9dc46aa1 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -330,7 +330,8 @@ class E2eKeysHandler(object):
(algorithm, key_id, ex_json, key)
)
else:
- new_keys.append((algorithm, key_id, encode_canonical_json(key)))
+ new_keys.append((
+ algorithm, key_id, encode_canonical_json(key).decode('ascii')))
yield self.store.add_e2e_one_time_keys(
user_id, device_id, time_now, new_keys
@@ -340,7 +341,7 @@ class E2eKeysHandler(object):
def _exception_to_failure(e):
if isinstance(e, CodeMessageException):
return {
- "status": e.code, "message": e.message,
+ "status": e.code, "message": str(e),
}
if isinstance(e, NotRetryingDestination):
@@ -358,7 +359,7 @@ def _exception_to_failure(e):
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which json then fails to serialize.
return {
- "status": 503, "message": str(e.message),
+ "status": 503, "message": str(e),
}
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
new file mode 100644
index 00000000..42b04037
--- /dev/null
+++ b/synapse/handlers/e2e_room_keys.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017, 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from six import iteritems
+
+from twisted.internet import defer
+
+from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
+from synapse.util.async_helpers import Linearizer
+
+logger = logging.getLogger(__name__)
+
+
+class E2eRoomKeysHandler(object):
+ """
+ Implements an optional realtime backup mechanism for encrypted E2E megolm room keys.
+ This gives a way for users to store and recover their megolm keys if they lose all
+ their clients. It should also extend easily to future room key mechanisms.
+ The actual payload of the encrypted keys is completely opaque to the handler.
+ """
+
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+
+ # Used to lock whenever a client is uploading key data. This prevents collisions
+ # between clients trying to upload the details of a new session, given all
+ # clients belonging to a user will receive and try to upload a new session at
+ # roughly the same time. Also used to lock out uploads when the key is being
+ # changed.
+ self._upload_linearizer = Linearizer("upload_room_keys_lock")
+
+ @defer.inlineCallbacks
+ def get_room_keys(self, user_id, version, room_id=None, session_id=None):
+ """Bulk get the E2E room keys for a given backup, optionally filtered to a given
+ room, or a given session.
+ See EndToEndRoomKeyStore.get_e2e_room_keys for full details.
+
+ Args:
+ user_id(str): the user whose keys we're getting
+ version(str): the version ID of the backup we're getting keys from
+ room_id(string): room ID to get keys for, for None to get keys for all rooms
+ session_id(string): session ID to get keys for, for None to get keys for all
+ sessions
+ Raises:
+ NotFoundError: if the backup version does not exist
+ Returns:
+ A deferred list of dicts giving the session_data and message metadata for
+ these room keys.
+ """
+
+ # we deliberately take the lock to get keys so that changing the version
+ # works atomically
+ with (yield self._upload_linearizer.queue(user_id)):
+ # make sure the backup version exists
+ try:
+ yield self.store.get_e2e_room_keys_version_info(user_id, version)
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError("Unknown backup version")
+ else:
+ raise
+
+ results = yield self.store.get_e2e_room_keys(
+ user_id, version, room_id, session_id
+ )
+
+ defer.returnValue(results)
+
+ @defer.inlineCallbacks
+ def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
+ """Bulk delete the E2E room keys for a given backup, optionally filtered to a given
+ room or a given session.
+ See EndToEndRoomKeyStore.delete_e2e_room_keys for full details.
+
+ Args:
+ user_id(str): the user whose backup we're deleting
+ version(str): the version ID of the backup we're deleting
+ room_id(string): room ID to delete keys for, for None to delete keys for all
+ rooms
+ session_id(string): session ID to delete keys for, for None to delete keys
+ for all sessions
+ Returns:
+ A deferred of the deletion transaction
+ """
+
+ # lock for consistency with uploading
+ with (yield self._upload_linearizer.queue(user_id)):
+ yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
+
+ @defer.inlineCallbacks
+ def upload_room_keys(self, user_id, version, room_keys):
+ """Bulk upload a list of room keys into a given backup version, asserting
+ that the given version is the current backup version. room_keys are merged
+ into the current backup as described in RoomKeysServlet.on_PUT().
+
+ Args:
+ user_id(str): the user whose backup we're setting
+ version(str): the version ID of the backup we're updating
+ room_keys(dict): a nested dict describing the room_keys we're setting:
+
+ {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+ }
+ }
+
+ Raises:
+ NotFoundError: if there are no versions defined
+ RoomKeysVersionError: if the uploaded version is not the current version
+ """
+
+ # TODO: Validate the JSON to make sure it has the right keys.
+
+ # XXX: perhaps we should use a finer grained lock here?
+ with (yield self._upload_linearizer.queue(user_id)):
+
+ # Check that the version we're trying to upload is the current version
+ try:
+ version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError("Version '%s' not found" % (version,))
+ else:
+ raise
+
+ if version_info['version'] != version:
+ # Check that the version we're trying to upload actually exists
+ try:
+ version_info = yield self.store.get_e2e_room_keys_version_info(
+ user_id, version,
+ )
+ # if we get this far, the version must exist
+ raise RoomKeysVersionError(current_version=version_info['version'])
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError("Version '%s' not found" % (version,))
+ else:
+ raise
+
+ # go through the room_keys.
+ # XXX: this should/could be done concurrently, given we're in a lock.
+ for room_id, room in iteritems(room_keys['rooms']):
+ for session_id, session in iteritems(room['sessions']):
+ yield self._upload_room_key(
+ user_id, version, room_id, session_id, session
+ )
+
+ @defer.inlineCallbacks
+ def _upload_room_key(self, user_id, version, room_id, session_id, room_key):
+ """Upload a given room_key for a given room and session into a given
+ version of the backup. Merges the key with any which might already exist.
+
+ Args:
+ user_id(str): the user whose backup we're setting
+ version(str): the version ID of the backup we're updating
+ room_id(str): the ID of the room whose keys we're setting
+ session_id(str): the session whose room_key we're setting
+ room_key(dict): the room_key being set
+ """
+
+ # get the room_key for this particular row
+ current_room_key = None
+ try:
+ current_room_key = yield self.store.get_e2e_room_key(
+ user_id, version, room_id, session_id
+ )
+ except StoreError as e:
+ if e.code == 404:
+ pass
+ else:
+ raise
+
+ if self._should_replace_room_key(current_room_key, room_key):
+ yield self.store.set_e2e_room_key(
+ user_id, version, room_id, session_id, room_key
+ )
+
+ @staticmethod
+ def _should_replace_room_key(current_room_key, room_key):
+ """
+ Determine whether to replace a given current_room_key (if any)
+ with a newly uploaded room_key backup
+
+ Args:
+ current_room_key (dict): Optional, the current room_key dict if any
+ room_key (dict): The new room_key dict which may or may not be fit to
+ replace the current_room_key
+
+ Returns:
+ True if current_room_key should be replaced by room_key in the backup
+ """
+
+ if current_room_key:
+ # spelt out with if/elifs rather than nested boolean expressions
+ # purely for legibility.
+
+ if room_key['is_verified'] and not current_room_key['is_verified']:
+ return True
+ elif (
+ room_key['first_message_index'] <
+ current_room_key['first_message_index']
+ ):
+ return True
+ elif room_key['forwarded_count'] < current_room_key['forwarded_count']:
+ return True
+ else:
+ return False
+ return True
+
+ @defer.inlineCallbacks
+ def create_version(self, user_id, version_info):
+ """Create a new backup version. This automatically becomes the new
+ backup version for the user's keys; previous backups will no longer be
+ writeable to.
+
+ Args:
+ user_id(str): the user whose backup version we're creating
+ version_info(dict): metadata about the new version being created
+
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
+ }
+
+ Returns:
+ A deferred of a string that gives the new version number.
+ """
+
+ # TODO: Validate the JSON to make sure it has the right keys.
+
+ # lock everyone out until we've switched version
+ with (yield self._upload_linearizer.queue(user_id)):
+ new_version = yield self.store.create_e2e_room_keys_version(
+ user_id, version_info
+ )
+ defer.returnValue(new_version)
+
+ @defer.inlineCallbacks
+ def get_version_info(self, user_id, version=None):
+ """Get the info about a given version of the user's backup
+
+ Args:
+ user_id(str): the user whose current backup version we're querying
+ version(str): Optional; if None gives the most recent version
+ otherwise a historical one.
+ Raises:
+ StoreError: code 404 if the requested backup version doesn't exist
+ Returns:
+ A deferred of a info dict that gives the info about the new version.
+
+ {
+ "version": "1234",
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
+ }
+ """
+
+ with (yield self._upload_linearizer.queue(user_id)):
+ res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
+ defer.returnValue(res)
+
+ @defer.inlineCallbacks
+ def delete_version(self, user_id, version=None):
+ """Deletes a given version of the user's e2e_room_keys backup
+
+ Args:
+ user_id(str): the user whose current backup version we're deleting
+ version(str): the version id of the backup being deleted
+ Raises:
+ StoreError: code 404 if this backup version doesn't exist
+ """
+
+ with (yield self._upload_linearizer.queue(user_id)):
+ yield self.store.delete_e2e_room_keys_version(user_id, version)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 3fa7a984..a3bb864b 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -18,7 +18,6 @@
import itertools
import logging
-import sys
import six
from six import iteritems, itervalues
@@ -54,7 +53,7 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEventsRestServlet,
)
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
-from synapse.state import resolve_events_with_factory
+from synapse.state import StateResolutionStore, resolve_events_with_store
from synapse.types import UserID, get_domain_from_id
from synapse.util import logcontext, unwrapFirstError
from synapse.util.async_helpers import Linearizer
@@ -69,6 +68,27 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
+def shortstr(iterable, maxitems=5):
+ """If iterable has maxitems or fewer, return the stringification of a list
+ containing those items.
+
+ Otherwise, return the stringification of a a list with the first maxitems items,
+ followed by "...".
+
+ Args:
+ iterable (Iterable): iterable to truncate
+ maxitems (int): number of items to return before truncating
+
+ Returns:
+ unicode
+ """
+
+ items = list(itertools.islice(iterable, maxitems + 1))
+ if len(items) <= maxitems:
+ return str(items)
+ return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]"
+
+
class FederationHandler(BaseHandler):
"""Handles events that originated from federation.
Responsible for:
@@ -85,7 +105,7 @@ class FederationHandler(BaseHandler):
self.hs = hs
- self.store = hs.get_datastore()
+ self.store = hs.get_datastore() # type: synapse.storage.DataStore
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
@@ -114,9 +134,8 @@ class FederationHandler(BaseHandler):
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
@defer.inlineCallbacks
- @log_function
def on_receive_pdu(
- self, origin, pdu, get_missing=True, sent_to_us_directly=False,
+ self, origin, pdu, sent_to_us_directly=False,
):
""" Process a PDU received via a federation /send/ transaction, or
via backfill of missing prev_events
@@ -125,14 +144,23 @@ class FederationHandler(BaseHandler):
origin (str): server which initiated the /send/ transaction. Will
be used to fetch missing events or state.
pdu (FrozenEvent): received PDU
- get_missing (bool): True if we should fetch missing prev_events
+ sent_to_us_directly (bool): True if this event was pushed to us; False if
+ we pulled it as the result of a missing prev_event.
Returns (Deferred): completes with None
"""
+ room_id = pdu.room_id
+ event_id = pdu.event_id
+
+ logger.info(
+ "[%s %s] handling received PDU: %s",
+ room_id, event_id, pdu,
+ )
+
# We reprocess pdus when we have seen them only as outliers
existing = yield self.store.get_event(
- pdu.event_id,
+ event_id,
allow_none=True,
allow_rejected=True,
)
@@ -147,7 +175,7 @@ class FederationHandler(BaseHandler):
)
)
if already_seen:
- logger.debug("Already seen pdu %s", pdu.event_id)
+ logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
return
# do some initial sanity-checking of the event. In particular, make
@@ -156,6 +184,7 @@ class FederationHandler(BaseHandler):
try:
self._sanity_check_event(pdu)
except SynapseError as err:
+ logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id)
raise FederationError(
"ERROR",
err.code,
@@ -165,33 +194,30 @@ class FederationHandler(BaseHandler):
# If we are currently in the process of joining this room, then we
# queue up events for later processing.
- if pdu.room_id in self.room_queues:
- logger.info("Ignoring PDU %s for room %s from %s for now; join "
- "in progress", pdu.event_id, pdu.room_id, origin)
- self.room_queues[pdu.room_id].append((pdu, origin))
+ if room_id in self.room_queues:
+ logger.info(
+ "[%s %s] Queuing PDU from %s for now: join in progress",
+ room_id, event_id, origin,
+ )
+ self.room_queues[room_id].append((pdu, origin))
return
- # If we're no longer in the room just ditch the event entirely. This
- # is probably an old server that has come back and thinks we're still
- # in the room (or we've been rejoined to the room by a state reset).
+ # If we're not in the room just ditch the event entirely. This is
+ # probably an old server that has come back and thinks we're still in
+ # the room (or we've been rejoined to the room by a state reset).
#
- # If we were never in the room then maybe our database got vaped and
- # we should check if we *are* in fact in the room. If we are then we
- # can magically rejoin the room.
+ # Note that if we were never in the room then we would have already
+ # dropped the event, since we wouldn't know the room version.
is_in_room = yield self.auth.check_host_in_room(
- pdu.room_id,
+ room_id,
self.server_name
)
if not is_in_room:
- was_in_room = yield self.store.was_host_joined(
- pdu.room_id, self.server_name,
+ logger.info(
+ "[%s %s] Ignoring PDU from %s as we're not in the room",
+ room_id, event_id, origin,
)
- if was_in_room:
- logger.info(
- "Ignoring PDU %s for room %s from %s as we've left the room!",
- pdu.event_id, pdu.room_id, origin,
- )
- defer.returnValue(None)
+ defer.returnValue(None)
state = None
auth_chain = []
@@ -204,11 +230,11 @@ class FederationHandler(BaseHandler):
)
logger.debug(
- "_handle_new_pdu min_depth for %s: %d",
- pdu.room_id, min_depth
+ "[%s %s] min_depth: %d",
+ room_id, event_id, min_depth,
)
- prevs = {e_id for e_id, _ in pdu.prev_events}
+ prevs = set(pdu.prev_event_ids())
seen = yield self.store.have_seen_events(prevs)
if min_depth and pdu.depth < min_depth:
@@ -218,17 +244,18 @@ class FederationHandler(BaseHandler):
# send to the clients.
pdu.internal_metadata.outlier = True
elif min_depth and pdu.depth > min_depth:
- if get_missing and prevs - seen:
+ missing_prevs = prevs - seen
+ if sent_to_us_directly and missing_prevs:
# If we're missing stuff, ensure we only fetch stuff one
# at a time.
logger.info(
- "Acquiring lock for room %r to fetch %d missing events: %r...",
- pdu.room_id, len(prevs - seen), list(prevs - seen)[:5],
+ "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
+ room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
)
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
logger.info(
- "Acquired lock for room %r to fetch %d missing events",
- pdu.room_id, len(prevs - seen),
+ "[%s %s] Acquired room lock to fetch %d missing prev_events",
+ room_id, event_id, len(missing_prevs),
)
yield self._get_missing_events_for_pdu(
@@ -241,69 +268,150 @@ class FederationHandler(BaseHandler):
if not prevs - seen:
logger.info(
- "Found all missing prev events for %s", pdu.event_id
+ "[%s %s] Found all missing prev_events",
+ room_id, event_id,
)
- elif prevs - seen:
+ elif missing_prevs:
logger.info(
- "Not fetching %d missing events for room %r,event %s: %r...",
- len(prevs - seen), pdu.room_id, pdu.event_id,
- list(prevs - seen)[:5],
+ "[%s %s] Not recursively fetching %d missing prev_events: %s",
+ room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
)
- if sent_to_us_directly and prevs - seen:
- # If they have sent it to us directly, and the server
- # isn't telling us about the auth events that it's
- # made a message referencing, we explode
- raise FederationError(
- "ERROR",
- 403,
- (
- "Your server isn't divulging details about prev_events "
- "referenced in this event."
- ),
- affected=pdu.event_id,
- )
- elif prevs - seen:
- # Calculate the state of the previous events, and
- # de-conflict them to find the current state.
- state_groups = []
+ if prevs - seen:
+ # We've still not been able to get all of the prev_events for this event.
+ #
+ # In this case, we need to fall back to asking another server in the
+ # federation for the state at this event. That's ok provided we then
+ # resolve the state against other bits of the DAG before using it (which
+ # will ensure that you can't just take over a room by sending an event,
+ # withholding its prev_events, and declaring yourself to be an admin in
+ # the subsequent state request).
+ #
+ # Now, if we're pulling this event as a missing prev_event, then clearly
+ # this event is not going to become the only forward-extremity and we are
+ # guaranteed to resolve its state against our existing forward
+ # extremities, so that should be fine.
+ #
+ # On the other hand, if this event was pushed to us, it is possible for
+ # it to become the only forward-extremity in the room, and we would then
+ # trust its state to be the state for the whole room. This is very bad.
+ # Further, if the event was pushed to us, there is no excuse for us not to
+ # have all the prev_events. We therefore reject any such events.
+ #
+ # XXX this really feels like it could/should be merged with the above,
+ # but there is an interaction with min_depth that I'm not really
+ # following.
+
+ if sent_to_us_directly:
+ logger.warn(
+ "[%s %s] Rejecting: failed to fetch %d prev events: %s",
+ room_id, event_id, len(prevs - seen), shortstr(prevs - seen)
+ )
+ raise FederationError(
+ "ERROR",
+ 403,
+ (
+ "Your server isn't divulging details about prev_events "
+ "referenced in this event."
+ ),
+ affected=pdu.event_id,
+ )
+
+ # Calculate the state after each of the previous events, and
+ # resolve them to find the correct state at the current event.
auth_chains = set()
+ event_map = {
+ event_id: pdu,
+ }
try:
# Get the state of the events we know about
- ours = yield self.store.get_state_groups(pdu.room_id, list(seen))
- state_groups.append(ours)
+ ours = yield self.store.get_state_groups_ids(room_id, seen)
+
+ # state_maps is a list of mappings from (type, state_key) to event_id
+ # type: list[dict[tuple[str, str], str]]
+ state_maps = list(ours.values())
+
+ # we don't need this any more, let's delete it.
+ del ours
# Ask the remote server for the states we don't
# know about
for p in prevs - seen:
- state, got_auth_chain = (
- yield self.federation_client.get_state_for_room(
- origin, pdu.room_id, p
- )
- )
- auth_chains.update(got_auth_chain)
- state_group = {(x.type, x.state_key): x.event_id for x in state}
- state_groups.append(state_group)
-
- # Resolve any conflicting state
- def fetch(ev_ids):
- return self.store.get_events(
- ev_ids, get_prev_content=False, check_redacted=False
+ logger.info(
+ "[%s %s] Requesting state at missing prev_event %s",
+ room_id, event_id, p,
)
- room_version = yield self.store.get_room_version(pdu.room_id)
- state_map = yield resolve_events_with_factory(
- room_version, state_groups, {pdu.event_id: pdu}, fetch
+ with logcontext.nested_logging_context(p):
+ # note that if any of the missing prevs share missing state or
+ # auth events, the requests to fetch those events are deduped
+ # by the get_pdu_cache in federation_client.
+ remote_state, got_auth_chain = (
+ yield self.federation_client.get_state_for_room(
+ origin, room_id, p,
+ )
+ )
+
+ # we want the state *after* p; get_state_for_room returns the
+ # state *before* p.
+ remote_event = yield self.federation_client.get_pdu(
+ [origin], p, outlier=True,
+ )
+
+ if remote_event is None:
+ raise Exception(
+ "Unable to get missing prev_event %s" % (p, )
+ )
+
+ if remote_event.is_state():
+ remote_state.append(remote_event)
+
+ # XXX hrm I'm not convinced that duplicate events will compare
+ # for equality, so I'm not sure this does what the author
+ # hoped.
+ auth_chains.update(got_auth_chain)
+
+ remote_state_map = {
+ (x.type, x.state_key): x.event_id for x in remote_state
+ }
+ state_maps.append(remote_state_map)
+
+ for x in remote_state:
+ event_map[x.event_id] = x
+
+ room_version = yield self.store.get_room_version(room_id)
+ state_map = yield resolve_events_with_store(
+ room_version, state_maps, event_map,
+ state_res_store=StateResolutionStore(self.store),
)
- state = (yield self.store.get_events(state_map.values())).values()
+ # We need to give _process_received_pdu the actual state events
+ # rather than event ids, so generate that now.
+
+ # First though we need to fetch all the events that are in
+ # state_map, so we can build up the state below.
+ evs = yield self.store.get_events(
+ list(state_map.values()),
+ get_prev_content=False,
+ check_redacted=False,
+ )
+ event_map.update(evs)
+
+ state = [
+ event_map[e] for e in six.itervalues(state_map)
+ ]
auth_chain = list(auth_chains)
except Exception:
+ logger.warn(
+ "[%s %s] Error attempting to resolve state at missing "
+ "prev_events",
+ room_id, event_id, exc_info=True,
+ )
raise FederationError(
"ERROR",
403,
"We can't get valid state history.",
- affected=pdu.event_id,
+ affected=event_id,
)
yield self._process_received_pdu(
@@ -322,15 +430,16 @@ class FederationHandler(BaseHandler):
prevs (set(str)): List of event ids which we are missing
min_depth (int): Minimum depth of events to return.
"""
- # We recalculate seen, since it may have changed.
+
+ room_id = pdu.room_id
+ event_id = pdu.event_id
+
seen = yield self.store.have_seen_events(prevs)
if not prevs - seen:
return
- latest = yield self.store.get_latest_event_ids_in_room(
- pdu.room_id
- )
+ latest = yield self.store.get_latest_event_ids_in_room(room_id)
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
@@ -338,8 +447,8 @@ class FederationHandler(BaseHandler):
latest |= seen
logger.info(
- "Missing %d events for room %r pdu %s: %r...",
- len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5]
+ "[%s %s]: Requesting missing events between %s and %s",
+ room_id, event_id, shortstr(latest), event_id,
)
# XXX: we set timeout to 10s to help workaround
@@ -360,132 +469,144 @@ class FederationHandler(BaseHandler):
# apparently.
#
# see https://github.com/matrix-org/synapse/pull/1744
+ #
+ # ----
+ #
+ # Update richvdh 2018/09/18: There are a number of problems with timing this
+ # request out agressively on the client side:
+ #
+ # - it plays badly with the server-side rate-limiter, which starts tarpitting you
+ # if you send too many requests at once, so you end up with the server carefully
+ # working through the backlog of your requests, which you have already timed
+ # out.
+ #
+ # - for this request in particular, we now (as of
+ # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
+ # server can't produce a plausible-looking set of prev_events - so we becone
+ # much more likely to reject the event.
+ #
+ # - contrary to what it says above, we do *not* fall back to fetching fresh state
+ # for the room if get_missing_events times out. Rather, we give up processing
+ # the PDU whose prevs we are missing, which then makes it much more likely that
+ # we'll end up back here for the *next* PDU in the list, which exacerbates the
+ # problem.
+ #
+ # - the agressive 10s timeout was introduced to deal with incoming federation
+ # requests taking 8 hours to process. It's not entirely clear why that was going
+ # on; certainly there were other issues causing traffic storms which are now
+ # resolved, and I think in any case we may be more sensible about our locking
+ # now. We're *certainly* more sensible about our logging.
+ #
+ # All that said: Let's try increasing the timout to 60s and see what happens.
missing_events = yield self.federation_client.get_missing_events(
origin,
- pdu.room_id,
+ room_id,
earliest_events_ids=list(latest),
latest_events=[pdu],
limit=10,
min_depth=min_depth,
- timeout=10000,
+ timeout=60000,
)
logger.info(
- "Got %d events: %r...",
- len(missing_events), [e.event_id for e in missing_events[:5]]
+ "[%s %s]: Got %d prev_events: %s",
+ room_id, event_id, len(missing_events), shortstr(missing_events),
)
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
- for e in missing_events:
- logger.info("Handling found event %s", e.event_id)
- try:
- yield self.on_receive_pdu(
- origin,
- e,
- get_missing=False
- )
- except FederationError as e:
- if e.code == 403:
- logger.warn("Event %s failed history check.")
- else:
- raise
+ for ev in missing_events:
+ logger.info(
+ "[%s %s] Handling received prev_event %s",
+ room_id, event_id, ev.event_id,
+ )
+ with logcontext.nested_logging_context(ev.event_id):
+ try:
+ yield self.on_receive_pdu(
+ origin,
+ ev,
+ sent_to_us_directly=False,
+ )
+ except FederationError as e:
+ if e.code == 403:
+ logger.warn(
+ "[%s %s] Received prev_event %s failed history check.",
+ room_id, event_id, ev.event_id,
+ )
+ else:
+ raise
- @log_function
@defer.inlineCallbacks
- def _process_received_pdu(self, origin, pdu, state, auth_chain):
+ def _process_received_pdu(self, origin, event, state, auth_chain):
""" Called when we have a new pdu. We need to do auth checks and put it
through the StateHandler.
"""
- event = pdu
-
- logger.debug("Processing event: %s", event)
+ room_id = event.room_id
+ event_id = event.event_id
- # FIXME (erikj): Awful hack to make the case where we are not currently
- # in the room work
- # If state and auth_chain are None, then we don't need to do this check
- # as we already know we have enough state in the DB to handle this
- # event.
- if state and auth_chain and not event.internal_metadata.is_outlier():
- is_in_room = yield self.auth.check_host_in_room(
- event.room_id,
- self.server_name
- )
- else:
- is_in_room = True
- if not is_in_room:
- logger.info(
- "Got event for room we're not in: %r %r",
- event.room_id, event.event_id
- )
+ logger.debug(
+ "[%s %s] Processing event: %s",
+ room_id, event_id, event,
+ )
- try:
- yield self._persist_auth_tree(
- origin, auth_chain, state, event
- )
- except AuthError as e:
- raise FederationError(
- "ERROR",
- e.code,
- e.msg,
- affected=event.event_id,
- )
+ event_ids = set()
+ if state:
+ event_ids |= {e.event_id for e in state}
+ if auth_chain:
+ event_ids |= {e.event_id for e in auth_chain}
- else:
- event_ids = set()
- if state:
- event_ids |= {e.event_id for e in state}
- if auth_chain:
- event_ids |= {e.event_id for e in auth_chain}
+ seen_ids = yield self.store.have_seen_events(event_ids)
- seen_ids = yield self.store.have_seen_events(event_ids)
+ if state and auth_chain is not None:
+ # If we have any state or auth_chain given to us by the replication
+ # layer, then we should handle them (if we haven't before.)
- if state and auth_chain is not None:
- # If we have any state or auth_chain given to us by the replication
- # layer, then we should handle them (if we haven't before.)
+ event_infos = []
- event_infos = []
+ for e in itertools.chain(auth_chain, state):
+ if e.event_id in seen_ids:
+ continue
+ e.internal_metadata.outlier = True
+ auth_ids = e.auth_event_ids()
+ auth = {
+ (e.type, e.state_key): e for e in auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ event_infos.append({
+ "event": e,
+ "auth_events": auth,
+ })
+ seen_ids.add(e.event_id)
- for e in itertools.chain(auth_chain, state):
- if e.event_id in seen_ids:
- continue
- e.internal_metadata.outlier = True
- auth_ids = [e_id for e_id, _ in e.auth_events]
- auth = {
- (e.type, e.state_key): e for e in auth_chain
- if e.event_id in auth_ids or e.type == EventTypes.Create
- }
- event_infos.append({
- "event": e,
- "auth_events": auth,
- })
- seen_ids.add(e.event_id)
-
- yield self._handle_new_events(origin, event_infos)
+ logger.info(
+ "[%s %s] persisting newly-received auth/state events %s",
+ room_id, event_id, [e["event"].event_id for e in event_infos]
+ )
+ yield self._handle_new_events(origin, event_infos)
- try:
- context = yield self._handle_new_event(
- origin,
- event,
- state=state,
- )
- except AuthError as e:
- raise FederationError(
- "ERROR",
- e.code,
- e.msg,
- affected=event.event_id,
- )
+ try:
+ context = yield self._handle_new_event(
+ origin,
+ event,
+ state=state,
+ )
+ except AuthError as e:
+ raise FederationError(
+ "ERROR",
+ e.code,
+ e.msg,
+ affected=event.event_id,
+ )
- room = yield self.store.get_room(event.room_id)
+ room = yield self.store.get_room(room_id)
if not room:
try:
yield self.store.store_room(
- room_id=event.room_id,
+ room_id=room_id,
room_creator_user_id="",
is_public=False,
)
@@ -513,7 +634,7 @@ class FederationHandler(BaseHandler):
if newly_joined:
user = UserID.from_string(event.state_key)
- yield self.user_joined_room(user, event.room_id)
+ yield self.user_joined_room(user, room_id)
@log_function
@defer.inlineCallbacks
@@ -568,7 +689,7 @@ class FederationHandler(BaseHandler):
edges = [
ev.event_id
for ev in events
- if set(e_id for e_id, _ in ev.prev_events) - event_ids
+ if set(ev.prev_event_ids()) - event_ids
]
logger.info(
@@ -594,8 +715,8 @@ class FederationHandler(BaseHandler):
required_auth = set(
a_id
- for event in events + state_events.values() + auth_events.values()
- for a_id, _ in event.auth_events
+ for event in events + list(state_events.values()) + list(auth_events.values())
+ for a_id in event.auth_event_ids()
)
auth_events.update({
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
@@ -611,7 +732,7 @@ class FederationHandler(BaseHandler):
auth_events.update(ret_events)
required_auth.update(
- a_id for event in ret_events.values() for a_id, _ in event.auth_events
+ a_id for event in ret_events.values() for a_id in event.auth_event_ids()
)
missing_auth = required_auth - set(auth_events)
@@ -638,7 +759,7 @@ class FederationHandler(BaseHandler):
required_auth.update(
a_id
for event in results if event
- for a_id, _ in event.auth_events
+ for a_id in event.auth_event_ids()
)
missing_auth = required_auth - set(auth_events)
@@ -658,7 +779,7 @@ class FederationHandler(BaseHandler):
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
- for a_id, _ in a.auth_events
+ for a_id in a.auth_event_ids()
if a_id in auth_events
}
})
@@ -670,7 +791,7 @@ class FederationHandler(BaseHandler):
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
- for a_id, _ in event_map[e_id].auth_events
+ for a_id in event_map[e_id].auth_event_ids()
if a_id in auth_events
}
})
@@ -802,7 +923,7 @@ class FederationHandler(BaseHandler):
)
continue
except NotRetryingDestination as e:
- logger.info(e.message)
+ logger.info(str(e))
continue
except FederationDeniedError as e:
logger.info(e)
@@ -883,17 +1004,17 @@ class FederationHandler(BaseHandler):
Raises:
SynapseError if the event does not pass muster
"""
- if len(ev.prev_events) > 20:
+ if len(ev.prev_event_ids()) > 20:
logger.warn("Rejecting event %s which has %i prev_events",
- ev.event_id, len(ev.prev_events))
+ ev.event_id, len(ev.prev_event_ids()))
raise SynapseError(
http_client.BAD_REQUEST,
"Too many prev_events",
)
- if len(ev.auth_events) > 10:
+ if len(ev.auth_event_ids()) > 10:
logger.warn("Rejecting event %s which has %i auth_events",
- ev.event_id, len(ev.auth_events))
+ ev.event_id, len(ev.auth_event_ids()))
raise SynapseError(
http_client.BAD_REQUEST,
"Too many auth_events",
@@ -918,7 +1039,7 @@ class FederationHandler(BaseHandler):
def on_event_auth(self, event_id):
event = yield self.store.get_event(event_id)
auth = yield self.store.get_auth_chain(
- [auth_id for auth_id, _ in event.auth_events],
+ [auth_id for auth_id in event.auth_event_ids()],
include_given=True
)
defer.returnValue([e for e in auth])
@@ -1027,7 +1148,8 @@ class FederationHandler(BaseHandler):
try:
logger.info("Processing queued PDU %s which was received "
"while we were joining %s", p.event_id, p.room_id)
- yield self.on_receive_pdu(origin, p)
+ with logcontext.nested_logging_context(p.event_id):
+ yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e:
logger.warn(
"Error handling queued PDU %s from %s: %s",
@@ -1358,7 +1480,7 @@ class FederationHandler(BaseHandler):
)
if state_groups:
- _, state = state_groups.items().pop()
+ _, state = list(state_groups.items()).pop()
results = state
if event.is_state():
@@ -1430,12 +1552,10 @@ class FederationHandler(BaseHandler):
else:
defer.returnValue(None)
- @log_function
def get_min_depth_for_context(self, context):
return self.store.get_min_depth(context)
@defer.inlineCallbacks
- @log_function
def _handle_new_event(self, origin, event, state=None, auth_events=None,
backfilled=False):
context = yield self._prep_event(
@@ -1444,6 +1564,9 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
+ # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
+ # hack around with a try/finally instead.
+ success = False
try:
if not event.internal_metadata.is_outlier() and not backfilled:
yield self.action_generator.handle_push_actions_for_event(
@@ -1454,15 +1577,13 @@ class FederationHandler(BaseHandler):
[(event, context)],
backfilled=backfilled,
)
- except: # noqa: E722, as we reraise the exception this is fine.
- tp, value, tb = sys.exc_info()
-
- logcontext.run_in_background(
- self.store.remove_push_actions_from_staging,
- event.event_id,
- )
-
- six.reraise(tp, value, tb)
+ success = True
+ finally:
+ if not success:
+ logcontext.run_in_background(
+ self.store.remove_push_actions_from_staging,
+ event.event_id,
+ )
defer.returnValue(context)
@@ -1475,15 +1596,22 @@ class FederationHandler(BaseHandler):
Notifies about the events where appropriate.
"""
- contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
- [
- logcontext.run_in_background(
- self._prep_event,
+
+ @defer.inlineCallbacks
+ def prep(ev_info):
+ event = ev_info["event"]
+ with logcontext.nested_logging_context(suffix=event.event_id):
+ res = yield self._prep_event(
origin,
- ev_info["event"],
+ event,
state=ev_info.get("state"),
auth_events=ev_info.get("auth_events"),
)
+ defer.returnValue(res)
+
+ contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
+ [
+ logcontext.run_in_background(prep, ev_info)
for ev_info in event_infos
], consumeErrors=True,
))
@@ -1533,7 +1661,7 @@ class FederationHandler(BaseHandler):
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
- for e_id, _ in e.auth_events:
+ for e_id in e.auth_event_ids():
if e_id not in event_map:
missing_auth_events.add(e_id)
@@ -1552,7 +1680,7 @@ class FederationHandler(BaseHandler):
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
- for e_id, _ in e.auth_events
+ for e_id in e.auth_event_ids()
if e_id in event_map
}
if create_event:
@@ -1620,10 +1748,10 @@ class FederationHandler(BaseHandler):
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
- if event.type == EventTypes.Member and not event.auth_events:
- if len(event.prev_events) == 1 and event.depth < 5:
+ if event.type == EventTypes.Member and not event.auth_event_ids():
+ if len(event.prev_event_ids()) == 1 and event.depth < 5:
c = yield self.store.get_event(
- event.prev_events[0][0],
+ event.prev_event_ids()[0],
allow_none=True,
)
if c and c.type == EventTypes.Create:
@@ -1635,8 +1763,8 @@ class FederationHandler(BaseHandler):
)
except AuthError as e:
logger.warn(
- "Rejecting %s because %s",
- event.event_id, e.msg
+ "[%s %s] Rejecting: %s",
+ event.room_id, event.event_id, e.msg
)
context.rejected = RejectedReason.AUTH_ERROR
@@ -1670,7 +1798,7 @@ class FederationHandler(BaseHandler):
# Now get the current auth_chain for the event.
local_auth_chain = yield self.store.get_auth_chain(
- [auth_id for auth_id, _ in event.auth_events],
+ [auth_id for auth_id in event.auth_event_ids()],
include_given=True
)
@@ -1687,7 +1815,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
def on_get_missing_events(self, origin, room_id, earliest_events,
- latest_events, limit, min_depth):
+ latest_events, limit):
in_room = yield self.auth.check_host_in_room(
room_id,
origin
@@ -1696,14 +1824,12 @@ class FederationHandler(BaseHandler):
raise AuthError(403, "Host not in room.")
limit = min(limit, 20)
- min_depth = max(min_depth, 0)
missing_events = yield self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
- min_depth=min_depth,
)
missing_events = yield filter_events_for_server(
@@ -1728,7 +1854,7 @@ class FederationHandler(BaseHandler):
"""
# Check if we have all the auth events.
current_state = set(e.event_id for e in auth_events.values())
- event_auth_events = set(e_id for e_id, _ in event.auth_events)
+ event_auth_events = set(event.auth_event_ids())
if event.is_state():
event_key = (event.type, event.state_key)
@@ -1772,7 +1898,7 @@ class FederationHandler(BaseHandler):
continue
try:
- auth_ids = [e_id for e_id, _ in e.auth_events]
+ auth_ids = e.auth_event_ids()
auth = {
(e.type, e.state_key): e for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
@@ -1793,7 +1919,7 @@ class FederationHandler(BaseHandler):
pass
have_events = yield self.store.get_seen_events_with_rejections(
- [e_id for e_id, _ in event.auth_events]
+ event.auth_event_ids()
)
seen_events = set(have_events.keys())
except Exception:
@@ -1895,7 +2021,7 @@ class FederationHandler(BaseHandler):
continue
try:
- auth_ids = [e_id for e_id, _ in ev.auth_events]
+ auth_ids = ev.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in result["auth_chain"]
@@ -2087,7 +2213,7 @@ class FederationHandler(BaseHandler):
missing_remote_ids = [e.event_id for e in missing_remotes]
base_remote_rejected = list(missing_remotes)
for e in missing_remotes:
- for e_id, _ in e.auth_events:
+ for e_id in e.auth_event_ids():
if e_id in missing_remote_ids:
try:
base_remote_rejected.remove(e)
@@ -2357,7 +2483,7 @@ class FederationHandler(BaseHandler):
if not backfilled: # Never notify for backfilled events
for event, _ in event_and_contexts:
- self._notify_persisted_event(event, max_stream_id)
+ yield self._notify_persisted_event(event, max_stream_id)
def _notify_persisted_event(self, event, max_stream_id):
"""Checks to see if notifier/pushers should be notified about the
@@ -2390,7 +2516,7 @@ class FederationHandler(BaseHandler):
extra_users=extra_users
)
- self.pusher_pool.on_new_notifications(
+ return self.pusher_pool.on_new_notifications(
event_stream_id, max_stream_id,
)
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 53e5e264..173315af 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -20,7 +20,7 @@ from six import iteritems
from twisted.internet import defer
-from synapse.api.errors import SynapseError
+from synapse.api.errors import HttpResponseException, SynapseError
from synapse.types import get_domain_from_id
logger = logging.getLogger(__name__)
@@ -37,9 +37,23 @@ def _create_rerouter(func_name):
)
else:
destination = get_domain_from_id(group_id)
- return getattr(self.transport_client, func_name)(
+ d = getattr(self.transport_client, func_name)(
destination, group_id, *args, **kwargs
)
+
+ # Capture errors returned by the remote homeserver and
+ # re-throw specific errors as SynapseErrors. This is so
+ # when the remote end responds with things like 403 Not
+ # In Group, we can communicate that to the client instead
+ # of a 500.
+ def h(failure):
+ failure.trap(HttpResponseException)
+ e = failure.value
+ if e.code == 403:
+ raise e.to_synapse_error()
+ return failure
+ d.addErrback(h)
+ return d
return f
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index e0093952..563bb3ce 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler):
room_end_token = "s%d" % (event.stream_ordering,)
deferred_room_state = run_in_background(
self.store.get_state_for_events,
- [event.event_id], None,
+ [event.event_id],
)
deferred_room_state.addCallback(
lambda states: states[event.event_id]
@@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler):
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
membership, member_event_id, is_peeking):
room_state = yield self.store.get_state_for_events(
- [member_event_id], None
+ [member_event_id],
)
room_state = room_state[member_event_id]
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index e484061c..a7cd779b 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -14,9 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-import sys
-import six
from six import iteritems, itervalues, string_types
from canonicaljson import encode_canonical_json, json
@@ -37,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
+from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.frozenutils import frozendict_json_encoder
@@ -82,7 +81,7 @@ class MessageHandler(object):
elif membership == Membership.LEAVE:
key = (event_type, state_key)
room_state = yield self.store.get_state_for_events(
- [membership_event_id], [key]
+ [membership_event_id], StateFilter.from_types([key])
)
data = room_state[membership_event_id].get(key)
@@ -90,7 +89,7 @@ class MessageHandler(object):
@defer.inlineCallbacks
def get_state_events(
- self, user_id, room_id, types=None, filtered_types=None,
+ self, user_id, room_id, state_filter=StateFilter.all(),
at_token=None, is_guest=False,
):
"""Retrieve all state events for a given room. If the user is
@@ -102,13 +101,8 @@ class MessageHandler(object):
Args:
user_id(str): The user requesting state events.
room_id(str): The room ID to get all state events from.
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
at_token(StreamToken|None): the stream token of the at which we are requesting
the stats. If the user is not allowed to view the state as of that
stream token, we raise a 403 SynapseError. If None, returns the current
@@ -141,7 +135,7 @@ class MessageHandler(object):
event = last_events[0]
if visible_events:
room_state = yield self.store.get_state_for_events(
- [event.event_id], types, filtered_types=filtered_types,
+ [event.event_id], state_filter=state_filter,
)
room_state = room_state[event.event_id]
else:
@@ -160,12 +154,12 @@ class MessageHandler(object):
if membership == Membership.JOIN:
state_ids = yield self.store.get_filtered_current_state_ids(
- room_id, types, filtered_types=filtered_types,
+ room_id, state_filter=state_filter,
)
room_state = yield self.store.get_events(state_ids.values())
elif membership == Membership.LEAVE:
room_state = yield self.store.get_state_for_events(
- [membership_event_id], types, filtered_types=filtered_types,
+ [membership_event_id], state_filter=state_filter,
)
room_state = room_state[membership_event_id]
@@ -433,6 +427,9 @@ class EventCreationHandler(object):
if event.is_state():
prev_state = yield self.deduplicate_state_event(event, context)
+ logger.info(
+ "Not bothering to persist duplicate state event %s", event.event_id,
+ )
if prev_state is not None:
defer.returnValue(prev_state)
@@ -624,6 +621,9 @@ class EventCreationHandler(object):
event, context
)
+ # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
+ # hack around with a try/finally instead.
+ success = False
try:
# If we're a worker we need to hit out to the master.
if self.config.worker_app:
@@ -636,6 +636,7 @@ class EventCreationHandler(object):
ratelimit=ratelimit,
extra_users=extra_users,
)
+ success = True
return
yield self.persist_and_notify_client_event(
@@ -645,17 +646,16 @@ class EventCreationHandler(object):
ratelimit=ratelimit,
extra_users=extra_users,
)
- except: # noqa: E722, as we reraise the exception this is fine.
- # Ensure that we actually remove the entries in the push actions
- # staging area, if we calculated them.
- tp, value, tb = sys.exc_info()
-
- run_in_background(
- self.store.remove_push_actions_from_staging,
- event.event_id,
- )
- six.reraise(tp, value, tb)
+ success = True
+ finally:
+ if not success:
+ # Ensure that we actually remove the entries in the push actions
+ # staging area, if we calculated them.
+ run_in_background(
+ self.store.remove_push_actions_from_staging,
+ event.event_id,
+ )
@defer.inlineCallbacks
def persist_and_notify_client_event(
@@ -778,7 +778,7 @@ class EventCreationHandler(object):
event, context=context
)
- self.pusher_pool.on_new_notifications(
+ yield self.pusher_pool.on_new_notifications(
event_stream_id, max_stream_id,
)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 5170d093..43f81bd6 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -21,6 +21,7 @@ from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.events.utils import serialize_event
+from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.logcontext import run_in_background
@@ -255,28 +256,19 @@ class PaginationHandler(object):
if event_filter and event_filter.lazy_load_members():
# TODO: remove redundant members
- types = [
- (EventTypes.Member, state_key)
- for state_key in set(
- event.sender # FIXME: we also care about invite targets etc.
- for event in events
- )
- ]
+ # FIXME: we also care about invite targets etc.
+ state_filter = StateFilter.from_types(
+ (EventTypes.Member, event.sender)
+ for event in events
+ )
state_ids = yield self.store.get_state_ids_for_event(
- events[0].event_id, types=types,
+ events[0].event_id, state_filter=state_filter,
)
if state_ids:
state = yield self.store.get_events(list(state_ids.values()))
-
- if state:
- state = yield filter_events_for_client(
- self.store,
- user_id,
- state.values(),
- is_peeking=(member_event_id is None),
- )
+ state = state.values()
time_now = self.clock.time_msec()
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 75b8b7ce..1dfbde84 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404:
logger.exception("Failed to get displayname")
raise
- except Exception:
- logger.exception("Failed to get displayname")
- else:
- defer.returnValue(result["displayname"])
+
+ defer.returnValue(result["displayname"])
@defer.inlineCallbacks
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
@@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404:
logger.exception("Failed to get avatar_url")
raise
- except Exception:
- logger.exception("Failed to get avatar_url")
defer.returnValue(result["avatar_url"])
@@ -278,7 +274,7 @@ class BaseProfileHandler(BaseHandler):
except Exception as e:
logger.warn(
"Failed to update join event for room %s - %s",
- room_id, str(e.message)
+ room_id, str(e)
)
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index a6f3181f..4c2690ba 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -119,7 +119,7 @@ class ReceiptsHandler(BaseHandler):
"receipt_key", max_batch_id, rooms=affected_room_ids
)
# Note that the min here shouldn't be relied upon to be accurate.
- self.hs.get_pusherpool().on_new_receipts(
+ yield self.hs.get_pusherpool().on_new_receipts(
min_batch_id, max_batch_id, affected_room_ids,
)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 1e53f2c6..d2beb275 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -220,15 +220,42 @@ class RegistrationHandler(BaseHandler):
# auto-join the user to any rooms we're supposed to dump them into
fake_requester = create_requester(user_id)
+
+ # try to create the room if we're the first user on the server
+ should_auto_create_rooms = False
+ if self.hs.config.autocreate_auto_join_rooms:
+ count = yield self.store.count_all_users()
+ should_auto_create_rooms = count == 1
+
for r in self.hs.config.auto_join_rooms:
try:
- yield self._join_user_to_room(fake_requester, r)
+ if should_auto_create_rooms:
+ room_alias = RoomAlias.from_string(r)
+ if self.hs.hostname != room_alias.domain:
+ logger.warning(
+ 'Cannot create room alias %s, '
+ 'it does not match server domain',
+ r,
+ )
+ else:
+ # create room expects the localpart of the room alias
+ room_alias_localpart = room_alias.localpart
+
+ # getting the RoomCreationHandler during init gives a dependency
+ # loop
+ yield self.hs.get_room_creation_handler().create_room(
+ fake_requester,
+ config={
+ "preset": "public_chat",
+ "room_alias_name": room_alias_localpart
+ },
+ ratelimit=False,
+ )
+ else:
+ yield self._join_user_to_room(fake_requester, r)
except Exception as e:
logger.error("Failed to join new user to %r: %r", r, e)
- # We used to generate default identicons here, but nowadays
- # we want clients to generate their own as part of their branding
- # rather than there being consistent matrix-wide ones, so we don't.
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -534,4 +561,5 @@ class RegistrationHandler(BaseHandler):
room_id=room_id,
remote_room_hosts=remote_room_hosts,
action="join",
+ ratelimit=False,
)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index c3f820b9..3928faa6 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -21,7 +21,7 @@ import math
import string
from collections import OrderedDict
-from six import string_types
+from six import iteritems, string_types
from twisted.internet import defer
@@ -32,9 +32,11 @@ from synapse.api.constants import (
JoinRules,
RoomCreationPreset,
)
-from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
+from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
+from synapse.util.async_helpers import Linearizer
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
@@ -72,6 +74,334 @@ class RoomCreationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.room_member_handler = hs.get_room_member_handler()
+
+ # linearizer to stop two upgrades happening at once
+ self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
+
+ @defer.inlineCallbacks
+ def upgrade_room(self, requester, old_room_id, new_version):
+ """Replace a room with a new room with a different version
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_version (unicode): the new room version to use
+
+ Returns:
+ Deferred[unicode]: the new room id
+ """
+ yield self.ratelimit(requester)
+
+ user_id = requester.user.to_string()
+
+ with (yield self._upgrade_linearizer.queue(old_room_id)):
+ # start by allocating a new room id
+ r = yield self.store.get_room(old_room_id)
+ if r is None:
+ raise NotFoundError("Unknown room id %s" % (old_room_id,))
+ new_room_id = yield self._generate_room_id(
+ creator_id=user_id, is_public=r["is_public"],
+ )
+
+ logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
+
+ # we create and auth the tombstone event before properly creating the new
+ # room, to check our user has perms in the old room.
+ tombstone_event, tombstone_context = (
+ yield self.event_creation_handler.create_event(
+ requester, {
+ "type": EventTypes.Tombstone,
+ "state_key": "",
+ "room_id": old_room_id,
+ "sender": user_id,
+ "content": {
+ "body": "This room has been replaced",
+ "replacement_room": new_room_id,
+ }
+ },
+ token_id=requester.access_token_id,
+ )
+ )
+ yield self.auth.check_from_context(tombstone_event, tombstone_context)
+
+ yield self.clone_exiting_room(
+ requester,
+ old_room_id=old_room_id,
+ new_room_id=new_room_id,
+ new_room_version=new_version,
+ tombstone_event_id=tombstone_event.event_id,
+ )
+
+ # now send the tombstone
+ yield self.event_creation_handler.send_nonmember_event(
+ requester, tombstone_event, tombstone_context,
+ )
+
+ old_room_state = yield tombstone_context.get_current_state_ids(self.store)
+
+ # update any aliases
+ yield self._move_aliases_to_new_room(
+ requester, old_room_id, new_room_id, old_room_state,
+ )
+
+ # and finally, shut down the PLs in the old room, and update them in the new
+ # room.
+ yield self._update_upgraded_room_pls(
+ requester, old_room_id, new_room_id, old_room_state,
+ )
+
+ defer.returnValue(new_room_id)
+
+ @defer.inlineCallbacks
+ def _update_upgraded_room_pls(
+ self, requester, old_room_id, new_room_id, old_room_state,
+ ):
+ """Send updated power levels in both rooms after an upgrade
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_room_id (unicode): the id of the replacement room
+ old_room_state (dict[tuple[str, str], str]): the state map for the old room
+
+ Returns:
+ Deferred
+ """
+ old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
+
+ if old_room_pl_event_id is None:
+ logger.warning(
+ "Not supported: upgrading a room with no PL event. Not setting PLs "
+ "in old room.",
+ )
+ return
+
+ old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
+
+ # we try to stop regular users from speaking by setting the PL required
+ # to send regular events and invites to 'Moderator' level. That's normally
+ # 50, but if the default PL in a room is 50 or more, then we set the
+ # required PL above that.
+
+ pl_content = dict(old_room_pl_state.content)
+ users_default = int(pl_content.get("users_default", 0))
+ restricted_level = max(users_default + 1, 50)
+
+ updated = False
+ for v in ("invite", "events_default"):
+ current = int(pl_content.get(v, 0))
+ if current < restricted_level:
+ logger.info(
+ "Setting level for %s in %s to %i (was %i)",
+ v, old_room_id, restricted_level, current,
+ )
+ pl_content[v] = restricted_level
+ updated = True
+ else:
+ logger.info(
+ "Not setting level for %s (already %i)",
+ v, current,
+ )
+
+ if updated:
+ try:
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester, {
+ "type": EventTypes.PowerLevels,
+ "state_key": '',
+ "room_id": old_room_id,
+ "sender": requester.user.to_string(),
+ "content": pl_content,
+ }, ratelimit=False,
+ )
+ except AuthError as e:
+ logger.warning("Unable to update PLs in old room: %s", e)
+
+ logger.info("Setting correct PLs in new room")
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester, {
+ "type": EventTypes.PowerLevels,
+ "state_key": '',
+ "room_id": new_room_id,
+ "sender": requester.user.to_string(),
+ "content": old_room_pl_state.content,
+ }, ratelimit=False,
+ )
+
+ @defer.inlineCallbacks
+ def clone_exiting_room(
+ self, requester, old_room_id, new_room_id, new_room_version,
+ tombstone_event_id,
+ ):
+ """Populate a new room based on an old room
+
+ Args:
+ requester (synapse.types.Requester): the user requesting the upgrade
+ old_room_id (unicode): the id of the room to be replaced
+ new_room_id (unicode): the id to give the new room (should already have been
+ created with _gemerate_room_id())
+ new_room_version (unicode): the new room version to use
+ tombstone_event_id (unicode|str): the ID of the tombstone event in the old
+ room.
+ Returns:
+ Deferred[None]
+ """
+ user_id = requester.user.to_string()
+
+ if not self.spam_checker.user_may_create_room(user_id):
+ raise SynapseError(403, "You are not permitted to create rooms")
+
+ creation_content = {
+ "room_version": new_room_version,
+ "predecessor": {
+ "room_id": old_room_id,
+ "event_id": tombstone_event_id,
+ }
+ }
+
+ initial_state = dict()
+
+ types_to_copy = (
+ (EventTypes.JoinRules, ""),
+ (EventTypes.Name, ""),
+ (EventTypes.Topic, ""),
+ (EventTypes.RoomHistoryVisibility, ""),
+ (EventTypes.GuestAccess, ""),
+ (EventTypes.RoomAvatar, ""),
+ )
+
+ old_room_state_ids = yield self.store.get_filtered_current_state_ids(
+ old_room_id, StateFilter.from_types(types_to_copy),
+ )
+ # map from event_id to BaseEvent
+ old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
+
+ for k, old_event_id in iteritems(old_room_state_ids):
+ old_event = old_room_state_events.get(old_event_id)
+ if old_event:
+ initial_state[k] = old_event.content
+
+ yield self._send_events_for_new_room(
+ requester,
+ new_room_id,
+
+ # we expect to override all the presets with initial_state, so this is
+ # somewhat arbitrary.
+ preset_config=RoomCreationPreset.PRIVATE_CHAT,
+
+ invite_list=[],
+ initial_state=initial_state,
+ creation_content=creation_content,
+ )
+
+ # XXX invites/joins
+ # XXX 3pid invites
+
+ @defer.inlineCallbacks
+ def _move_aliases_to_new_room(
+ self, requester, old_room_id, new_room_id, old_room_state,
+ ):
+ directory_handler = self.hs.get_handlers().directory_handler
+
+ aliases = yield self.store.get_aliases_for_room(old_room_id)
+
+ # check to see if we have a canonical alias.
+ canonical_alias = None
+ canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
+ if canonical_alias_event_id:
+ canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
+ if canonical_alias_event:
+ canonical_alias = canonical_alias_event.content.get("alias", "")
+
+ # first we try to remove the aliases from the old room (we suppress sending
+ # the room_aliases event until the end).
+ #
+ # Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
+ # and (b) unless the user is a server admin, which the user created.
+ #
+ # This is probably correct - given we don't allow such aliases to be deleted
+ # normally, it would be odd to allow it in the case of doing a room upgrade -
+ # but it makes the upgrade less effective, and you have to wonder why a room
+ # admin can't remove aliases that point to that room anyway.
+ # (cf https://github.com/matrix-org/synapse/issues/2360)
+ #
+ removed_aliases = []
+ for alias_str in aliases:
+ alias = RoomAlias.from_string(alias_str)
+ try:
+ yield directory_handler.delete_association(
+ requester, alias, send_event=False,
+ )
+ removed_aliases.append(alias_str)
+ except SynapseError as e:
+ logger.warning(
+ "Unable to remove alias %s from old room: %s",
+ alias, e,
+ )
+
+ # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
+ # of this.
+ if not removed_aliases:
+ return
+
+ try:
+ # this can fail if, for some reason, our user doesn't have perms to send
+ # m.room.aliases events in the old room (note that we've already checked that
+ # they have perms to send a tombstone event, so that's not terribly likely).
+ #
+ # If that happens, it's regrettable, but we should carry on: it's the same
+ # as when you remove an alias from the directory normally - it just means that
+ # the aliases event gets out of sync with the directory
+ # (cf https://github.com/vector-im/riot-web/issues/2369)
+ yield directory_handler.send_room_alias_update_event(
+ requester, old_room_id,
+ )
+ except AuthError as e:
+ logger.warning(
+ "Failed to send updated alias event on old room: %s", e,
+ )
+
+ # we can now add any aliases we successfully removed to the new room.
+ for alias in removed_aliases:
+ try:
+ yield directory_handler.create_association(
+ requester, RoomAlias.from_string(alias),
+ new_room_id, servers=(self.hs.hostname, ),
+ send_event=False,
+ )
+ logger.info("Moved alias %s to new room", alias)
+ except SynapseError as e:
+ # I'm not really expecting this to happen, but it could if the spam
+ # checking module decides it shouldn't, or similar.
+ logger.error(
+ "Error adding alias %s to new room: %s",
+ alias, e,
+ )
+
+ try:
+ if canonical_alias and (canonical_alias in removed_aliases):
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.CanonicalAlias,
+ "state_key": "",
+ "room_id": new_room_id,
+ "sender": requester.user.to_string(),
+ "content": {"alias": canonical_alias, },
+ },
+ ratelimit=False
+ )
+
+ yield directory_handler.send_room_alias_update_event(
+ requester, new_room_id,
+ )
+ except SynapseError as e:
+ # again I'm not really expecting this to fail, but if it does, I'd rather
+ # we returned the new room to the client at this point.
+ logger.error(
+ "Unable to send updated alias events in new room: %s", e,
+ )
@defer.inlineCallbacks
def create_room(self, requester, config, ratelimit=True,
@@ -164,36 +494,16 @@ class RoomCreationHandler(BaseHandler):
visibility = config.get("visibility", None)
is_public = visibility == "public"
- # autogen room IDs and try to create it. We may clash, so just
- # try a few times till one goes through, giving up eventually.
- attempts = 0
- room_id = None
- while attempts < 5:
- try:
- random_string = stringutils.random_string(18)
- gen_room_id = RoomID(
- random_string,
- self.hs.hostname,
- )
- yield self.store.store_room(
- room_id=gen_room_id.to_string(),
- room_creator_user_id=user_id,
- is_public=is_public
- )
- room_id = gen_room_id.to_string()
- break
- except StoreError:
- attempts += 1
- if not room_id:
- raise StoreError(500, "Couldn't generate a room ID.")
+ room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
if room_alias:
directory_handler = self.hs.get_handlers().directory_handler
yield directory_handler.create_association(
- user_id=user_id,
+ requester=requester,
room_id=room_id,
room_alias=room_alias,
servers=[self.hs.hostname],
+ send_event=False,
)
preset_config = config.get(
@@ -214,18 +524,15 @@ class RoomCreationHandler(BaseHandler):
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version
- room_member_handler = self.hs.get_room_member_handler()
-
yield self._send_events_for_new_room(
requester,
room_id,
- room_member_handler,
preset_config=preset_config,
invite_list=invite_list,
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
- power_level_content_override=config.get("power_level_content_override", {}),
+ power_level_content_override=config.get("power_level_content_override"),
creator_join_profile=creator_join_profile,
)
@@ -261,7 +568,7 @@ class RoomCreationHandler(BaseHandler):
if is_direct:
content["is_direct"] = is_direct
- yield room_member_handler.update_membership(
+ yield self.room_member_handler.update_membership(
requester,
UserID.from_string(invitee),
room_id,
@@ -289,7 +596,7 @@ class RoomCreationHandler(BaseHandler):
if room_alias:
result["room_alias"] = room_alias.to_string()
yield directory_handler.send_room_alias_update_event(
- requester, user_id, room_id
+ requester, room_id
)
defer.returnValue(result)
@@ -299,14 +606,13 @@ class RoomCreationHandler(BaseHandler):
self,
creator, # A Requester object.
room_id,
- room_member_handler,
preset_config,
invite_list,
initial_state,
creation_content,
- room_alias,
- power_level_content_override,
- creator_join_profile,
+ room_alias=None,
+ power_level_content_override=None,
+ creator_join_profile=None,
):
def create(etype, content, **kwargs):
e = {
@@ -322,6 +628,7 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks
def send(etype, content, **kwargs):
event = create(etype, content, **kwargs)
+ logger.info("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event(
creator,
event,
@@ -344,7 +651,8 @@ class RoomCreationHandler(BaseHandler):
content=creation_content,
)
- yield room_member_handler.update_membership(
+ logger.info("Sending %s in new room", EventTypes.Member)
+ yield self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
@@ -386,7 +694,8 @@ class RoomCreationHandler(BaseHandler):
for invitee in invite_list:
power_level_content["users"][invitee] = 100
- power_level_content.update(power_level_content_override)
+ if power_level_content_override:
+ power_level_content.update(power_level_content_override)
yield send(
etype=EventTypes.PowerLevels,
@@ -425,6 +734,30 @@ class RoomCreationHandler(BaseHandler):
content=content,
)
+ @defer.inlineCallbacks
+ def _generate_room_id(self, creator_id, is_public):
+ # autogen room IDs and try to create it. We may clash, so just
+ # try a few times till one goes through, giving up eventually.
+ attempts = 0
+ while attempts < 5:
+ try:
+ random_string = stringutils.random_string(18)
+ gen_room_id = RoomID(
+ random_string,
+ self.hs.hostname,
+ ).to_string()
+ if isinstance(gen_room_id, bytes):
+ gen_room_id = gen_room_id.decode('utf-8')
+ yield self.store.store_room(
+ room_id=gen_room_id,
+ room_creator_user_id=creator_id,
+ is_public=is_public,
+ )
+ defer.returnValue(gen_room_id)
+ except StoreError:
+ attempts += 1
+ raise StoreError(500, "Couldn't generate a room ID.")
+
class RoomContextHandler(object):
def __init__(self, hs):
@@ -488,23 +821,24 @@ class RoomContextHandler(object):
else:
last_event_id = event_id
- types = None
- filtered_types = None
if event_filter and event_filter.lazy_load_members():
- members = set(ev.sender for ev in itertools.chain(
- results["events_before"],
- (results["event"],),
- results["events_after"],
- ))
- filtered_types = [EventTypes.Member]
- types = [(EventTypes.Member, member) for member in members]
+ state_filter = StateFilter.from_lazy_load_member_list(
+ ev.sender
+ for ev in itertools.chain(
+ results["events_before"],
+ (results["event"],),
+ results["events_after"],
+ )
+ )
+ else:
+ state_filter = StateFilter.all()
# XXX: why do we return the state as of the last event rather than the
# first? Shouldn't we be consistent with /sync?
# https://github.com/matrix-org/matrix-doc/issues/687
state = yield self.store.get_state_for_events(
- [last_event_id], types, filtered_types=filtered_types,
+ [last_event_id], state_filter=state_filter,
)
results["state"] = list(state[last_event_id].values())
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 37e41afd..dc886208 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -16,7 +16,7 @@
import logging
from collections import namedtuple
-from six import iteritems
+from six import PY3, iteritems
from six.moves import range
import msgpack
@@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler):
# Filter out rooms that we don't want to return
rooms_to_scan = [
r for r in sorted_rooms
- if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
+ if r not in newly_unpublished and rooms_to_num_joined[r] > 0
]
total_room_count = len(rooms_to_scan)
@@ -444,9 +444,16 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", (
@classmethod
def from_token(cls, token):
+ if PY3:
+ # The argument raw=False is only available on new versions of
+ # msgpack, and only really needed on Python 3. Gate it behind
+ # a PY3 check to avoid causing issues on Debian-packaged versions.
+ decoded = msgpack.loads(decode_base64(token), raw=False)
+ else:
+ decoded = msgpack.loads(decode_base64(token))
return RoomListNextBatch(**{
cls.REVERSE_KEY_DICT[key]: val
- for key, val in msgpack.loads(decode_base64(token)).items()
+ for key, val in decoded.items()
})
def to_token(self):
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index f6436190..07fd3e82 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -583,6 +583,11 @@ class RoomMemberHandler(object):
room_id = mapping["room_id"]
servers = mapping["servers"]
+ # put the server which owns the alias at the front of the server list.
+ if room_alias.domain in servers:
+ servers.remove(room_alias.domain)
+ servers.insert(0, room_alias.domain)
+
defer.returnValue((RoomID.from_string(room_id), servers))
@defer.inlineCallbacks
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index c464adbd..80e7b15d 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.events.utils import serialize_event
+from synapse.storage.state import StateFilter
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
@@ -54,7 +55,7 @@ class SearchHandler(BaseHandler):
batch_token = None
if batch:
try:
- b = decode_base64(batch)
+ b = decode_base64(batch).decode('ascii')
batch_group, batch_group_key, batch_token = b.split("\n")
assert batch_group is not None
@@ -258,18 +259,18 @@ class SearchHandler(BaseHandler):
# it returns more from the same group (if applicable) rather
# than reverting to searching all results again.
if batch_group and batch_group_key:
- global_next_batch = encode_base64("%s\n%s\n%s" % (
+ global_next_batch = encode_base64(("%s\n%s\n%s" % (
batch_group, batch_group_key, pagination_token
- ))
+ )).encode('ascii'))
else:
- global_next_batch = encode_base64("%s\n%s\n%s" % (
+ global_next_batch = encode_base64(("%s\n%s\n%s" % (
"all", "", pagination_token
- ))
+ )).encode('ascii'))
for room_id, group in room_groups.items():
- group["next_batch"] = encode_base64("%s\n%s\n%s" % (
+ group["next_batch"] = encode_base64(("%s\n%s\n%s" % (
"room_id", room_id, pagination_token
- ))
+ )).encode('ascii'))
allowed_events.extend(room_events)
@@ -324,9 +325,12 @@ class SearchHandler(BaseHandler):
else:
last_event_id = event.event_id
+ state_filter = StateFilter.from_types(
+ [(EventTypes.Member, sender) for sender in senders]
+ )
+
state = yield self.store.get_state_for_event(
- last_event_id,
- types=[(EventTypes.Member, sender) for sender in senders]
+ last_event_id, state_filter
)
res["profile_info"] = {
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index ef20c229..09739f28 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -20,10 +20,14 @@ import logging
from six import iteritems, itervalues
+from prometheus_client import Counter
+
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.push.clientformat import format_push_rules_for_user
+from synapse.storage.roommember import MemberSummary
+from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
@@ -35,6 +39,19 @@ from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
+
+# Counts the number of times we returned a non-empty sync. `type` is one of
+# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is
+# "true" or "false" depending on if the request asked for lazy loaded members or
+# not.
+non_empty_sync_counter = Counter(
+ "synapse_handlers_sync_nonempty_total",
+ "Count of non empty sync responses. type is initial_sync/full_state_sync"
+ "/incremental_sync. lazy_loaded indicates if lazy loaded members were "
+ "enabled for that request.",
+ ["type", "lazy_loaded"],
+)
+
# Store the cache that tracks which lazy-loaded members have been sent to a given
# client for no more than 30 minutes.
LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
@@ -226,14 +243,16 @@ class SyncHandler(object):
@defer.inlineCallbacks
def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
full_state):
+ if since_token is None:
+ sync_type = "initial_sync"
+ elif full_state:
+ sync_type = "full_state_sync"
+ else:
+ sync_type = "incremental_sync"
+
context = LoggingContext.current_context()
if context:
- if since_token is None:
- context.tag = "initial_sync"
- elif full_state:
- context.tag = "full_state_sync"
- else:
- context.tag = "incremental_sync"
+ context.tag = sync_type
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
@@ -241,7 +260,6 @@ class SyncHandler(object):
result = yield self.current_sync_for_user(
sync_config, since_token, full_state=full_state,
)
- defer.returnValue(result)
else:
def current_sync_callback(before_token, after_token):
return self.current_sync_for_user(sync_config, since_token)
@@ -250,7 +268,15 @@ class SyncHandler(object):
sync_config.user.to_string(), timeout, current_sync_callback,
from_token=since_token,
)
- defer.returnValue(result)
+
+ if result:
+ if sync_config.filter_collection.lazy_load_members():
+ lazy_loaded = "true"
+ else:
+ lazy_loaded = "false"
+ non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
+
+ defer.returnValue(result)
def current_sync_for_user(self, sync_config, since_token=None,
full_state=False):
@@ -444,25 +470,20 @@ class SyncHandler(object):
))
@defer.inlineCallbacks
- def get_state_after_event(self, event, types=None, filtered_types=None):
+ def get_state_after_event(self, event, state_filter=StateFilter.all()):
"""
Get the room state after the given event
Args:
event(synapse.events.EventBase): event of interest
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
A Deferred map from ((type, state_key)->Event)
"""
state_ids = yield self.store.get_state_ids_for_event(
- event.event_id, types, filtered_types=filtered_types,
+ event.event_id, state_filter=state_filter,
)
if event.is_state():
state_ids = state_ids.copy()
@@ -470,18 +491,14 @@ class SyncHandler(object):
defer.returnValue(state_ids)
@defer.inlineCallbacks
- def get_state_at(self, room_id, stream_position, types=None, filtered_types=None):
+ def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()):
""" Get the room state at a particular stream position
Args:
room_id(str): room for which to get state
stream_position(StreamToken): point at which to get state
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
A Deferred map from ((type, state_key)->Event)
@@ -497,7 +514,7 @@ class SyncHandler(object):
if last_events:
last_event = last_events[-1]
state = yield self.get_state_after_event(
- last_event, types, filtered_types=filtered_types,
+ last_event, state_filter=state_filter,
)
else:
@@ -525,6 +542,8 @@ class SyncHandler(object):
A deferred dict describing the room summary
"""
+ # FIXME: we could/should get this from room_stats when matthew/stats lands
+
# FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305
last_events, _ = yield self.store.get_recent_event_ids_for_room(
room_id, end_token=now_token.room_key, limit=1,
@@ -536,45 +555,69 @@ class SyncHandler(object):
last_event = last_events[-1]
state_ids = yield self.store.get_state_ids_for_event(
- last_event.event_id, [
- (EventTypes.Member, None),
+ last_event.event_id,
+ state_filter=StateFilter.from_types([
(EventTypes.Name, ''),
(EventTypes.CanonicalAlias, ''),
- ]
+ ]),
)
- member_ids = {
- state_key: event_id
- for (t, state_key), event_id in state_ids.iteritems()
- if t == EventTypes.Member
- }
+ # this is heavily cached, thus: fast.
+ details = yield self.store.get_room_summary(room_id)
+
name_id = state_ids.get((EventTypes.Name, ''))
canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ''))
summary = {}
-
- # FIXME: it feels very heavy to load up every single membership event
- # just to calculate the counts.
- member_events = yield self.store.get_events(member_ids.values())
-
- joined_user_ids = []
- invited_user_ids = []
-
- for ev in member_events.values():
- if ev.content.get("membership") == Membership.JOIN:
- joined_user_ids.append(ev.state_key)
- elif ev.content.get("membership") == Membership.INVITE:
- invited_user_ids.append(ev.state_key)
+ empty_ms = MemberSummary([], 0)
# TODO: only send these when they change.
- summary["m.joined_member_count"] = len(joined_user_ids)
- summary["m.invited_member_count"] = len(invited_user_ids)
+ summary["m.joined_member_count"] = (
+ details.get(Membership.JOIN, empty_ms).count
+ )
+ summary["m.invited_member_count"] = (
+ details.get(Membership.INVITE, empty_ms).count
+ )
- if name_id or canonical_alias_id:
- defer.returnValue(summary)
+ # if the room has a name or canonical_alias set, we can skip
+ # calculating heroes. we assume that if the event has contents, it'll
+ # be a valid name or canonical_alias - i.e. we're checking that they
+ # haven't been "deleted" by blatting {} over the top.
+ if name_id:
+ name = yield self.store.get_event(name_id, allow_none=True)
+ if name and name.content:
+ defer.returnValue(summary)
+
+ if canonical_alias_id:
+ canonical_alias = yield self.store.get_event(
+ canonical_alias_id, allow_none=True,
+ )
+ if canonical_alias and canonical_alias.content:
+ defer.returnValue(summary)
+
+ joined_user_ids = [
+ r[0] for r in details.get(Membership.JOIN, empty_ms).members
+ ]
+ invited_user_ids = [
+ r[0] for r in details.get(Membership.INVITE, empty_ms).members
+ ]
+ gone_user_ids = (
+ [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
+ [r[0] for r in details.get(Membership.BAN, empty_ms).members]
+ )
- # FIXME: order by stream ordering, not alphabetic
+ # FIXME: only build up a member_ids list for our heroes
+ member_ids = {}
+ for membership in (
+ Membership.JOIN,
+ Membership.INVITE,
+ Membership.LEAVE,
+ Membership.BAN
+ ):
+ for user_id, event_id in details.get(membership, empty_ms).members:
+ member_ids[user_id] = event_id
+ # FIXME: order by stream ordering rather than as returned by SQL
me = sync_config.user.to_string()
if (joined_user_ids or invited_user_ids):
summary['m.heroes'] = sorted(
@@ -586,7 +629,11 @@ class SyncHandler(object):
)[0:5]
else:
summary['m.heroes'] = sorted(
- [user_id for user_id in member_ids.keys() if user_id != me]
+ [
+ user_id
+ for user_id in gone_user_ids
+ if user_id != me
+ ]
)[0:5]
if not sync_config.filter_collection.lazy_load_members():
@@ -663,8 +710,7 @@ class SyncHandler(object):
with Measure(self.clock, "compute_state_delta"):
- types = None
- filtered_types = None
+ members_to_fetch = None
lazy_load_members = sync_config.filter_collection.lazy_load_members()
include_redundant_members = (
@@ -675,16 +721,21 @@ class SyncHandler(object):
# We only request state for the members needed to display the
# timeline:
- types = [
- (EventTypes.Member, state_key)
- for state_key in set(
- event.sender # FIXME: we also care about invite targets etc.
- for event in batch.events
- )
- ]
+ members_to_fetch = set(
+ event.sender # FIXME: we also care about invite targets etc.
+ for event in batch.events
+ )
+
+ if full_state:
+ # always make sure we LL ourselves so we know we're in the room
+ # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
+ # We only need apply this on full state syncs given we disabled
+ # LL for incr syncs in #3840.
+ members_to_fetch.add(sync_config.user.to_string())
- # only apply the filtering to room members
- filtered_types = [EventTypes.Member]
+ state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
+ else:
+ state_filter = StateFilter.all()
timeline_state = {
(event.type, event.state_key): event.event_id
@@ -694,19 +745,17 @@ class SyncHandler(object):
if full_state:
if batch:
current_state_ids = yield self.store.get_state_ids_for_event(
- batch.events[-1].event_id, types=types,
- filtered_types=filtered_types,
+ batch.events[-1].event_id, state_filter=state_filter,
)
state_ids = yield self.store.get_state_ids_for_event(
- batch.events[0].event_id, types=types,
- filtered_types=filtered_types,
+ batch.events[0].event_id, state_filter=state_filter,
)
else:
current_state_ids = yield self.get_state_at(
- room_id, stream_position=now_token, types=types,
- filtered_types=filtered_types,
+ room_id, stream_position=now_token,
+ state_filter=state_filter,
)
state_ids = current_state_ids
@@ -719,19 +768,31 @@ class SyncHandler(object):
lazy_load_members=lazy_load_members,
)
elif batch.limited:
- state_at_previous_sync = yield self.get_state_at(
- room_id, stream_position=since_token, types=types,
- filtered_types=filtered_types,
+ state_at_timeline_start = yield self.store.get_state_ids_for_event(
+ batch.events[0].event_id, state_filter=state_filter,
)
- current_state_ids = yield self.store.get_state_ids_for_event(
- batch.events[-1].event_id, types=types,
- filtered_types=filtered_types,
+ # for now, we disable LL for gappy syncs - see
+ # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
+ # N.B. this slows down incr syncs as we are now processing way
+ # more state in the server than if we were LLing.
+ #
+ # We still have to filter timeline_start to LL entries (above) in order
+ # for _calculate_state's LL logic to work, as we have to include LL
+ # members for timeline senders in case they weren't loaded in the initial
+ # sync. We do this by (counterintuitively) by filtering timeline_start
+ # members to just be ones which were timeline senders, which then ensures
+ # all of the rest get included in the state block (if we need to know
+ # about them).
+ state_filter = StateFilter.all()
+
+ state_at_previous_sync = yield self.get_state_at(
+ room_id, stream_position=since_token,
+ state_filter=state_filter,
)
- state_at_timeline_start = yield self.store.get_state_ids_for_event(
- batch.events[0].event_id, types=types,
- filtered_types=filtered_types,
+ current_state_ids = yield self.store.get_state_ids_for_event(
+ batch.events[-1].event_id, state_filter=state_filter,
)
state_ids = _calculate_state(
@@ -739,22 +800,28 @@ class SyncHandler(object):
timeline_start=state_at_timeline_start,
previous=state_at_previous_sync,
current=current_state_ids,
+ # we have to include LL members in case LL initial sync missed them
lazy_load_members=lazy_load_members,
)
else:
state_ids = {}
if lazy_load_members:
- if types:
- # We're returning an incremental sync, with no "gap" since
- # the previous sync, so normally there would be no state to return
+ if members_to_fetch and batch.events:
+ # We're returning an incremental sync, with no
+ # "gap" since the previous sync, so normally there would be
+ # no state to return.
# But we're lazy-loading, so the client might need some more
# member events to understand the events in this timeline.
# So we fish out all the member events corresponding to the
# timeline here, and then dedupe any redundant ones below.
state_ids = yield self.store.get_state_ids_for_event(
- batch.events[0].event_id, types=types,
- filtered_types=None, # we only want members!
+ batch.events[0].event_id,
+ # we only want members!
+ state_filter=StateFilter.from_types(
+ (EventTypes.Member, member)
+ for member in members_to_fetch
+ ),
)
if lazy_load_members and not include_redundant_members:
@@ -774,7 +841,7 @@ class SyncHandler(object):
logger.debug("filtering state from %r...", state_ids)
state_ids = {
t: event_id
- for t, event_id in state_ids.iteritems()
+ for t, event_id in iteritems(state_ids)
if cache.get(t[1]) != event_id
}
logger.debug("...to %r", state_ids)
@@ -1575,6 +1642,19 @@ class SyncHandler(object):
newly_joined_room=newly_joined,
)
+ # When we join the room (or the client requests full_state), we should
+ # send down any existing tags. Usually the user won't have tags in a
+ # newly joined room, unless either a) they've joined before or b) the
+ # tag was added by synapse e.g. for server notice rooms.
+ if full_state:
+ user_id = sync_result_builder.sync_config.user.to_string()
+ tags = yield self.store.get_tags_for_room(user_id, room_id)
+
+ # If there aren't any tags, don't send the empty tags list down
+ # sync
+ if not tags:
+ tags = None
+
account_data_events = []
if tags is not None:
account_data_events.append({
@@ -1603,10 +1683,24 @@ class SyncHandler(object):
)
summary = {}
+
+ # we include a summary in room responses when we're lazy loading
+ # members (as the client otherwise doesn't have enough info to form
+ # the name itself).
if (
sync_config.filter_collection.lazy_load_members() and
(
+ # we recalulate the summary:
+ # if there are membership changes in the timeline, or
+ # if membership has changed during a gappy sync, or
+ # if this is an initial sync.
any(ev.type == EventTypes.Member for ev in batch.events) or
+ (
+ # XXX: this may include false positives in the form of LL
+ # members which have snuck into state
+ batch.limited and
+ any(t == EventTypes.Member for (t, k) in state)
+ ) or
since_token is None
)
):
@@ -1636,6 +1730,16 @@ class SyncHandler(object):
unread_notifications["highlight_count"] = notifs["highlight_count"]
sync_result_builder.joined.append(room_sync)
+
+ if batch.limited and since_token:
+ user_id = sync_result_builder.sync_config.user.to_string()
+ logger.info(
+ "Incremental gappy sync of %s for user %s with %d state events" % (
+ room_id,
+ user_id,
+ len(state),
+ )
+ )
elif room_builder.rtype == "archived":
room_sync = ArchivedSyncResult(
room_id=room_id,
@@ -1729,17 +1833,17 @@ def _calculate_state(
event_id_to_key = {
e: key
for key, e in itertools.chain(
- timeline_contains.items(),
- previous.items(),
- timeline_start.items(),
- current.items(),
+ iteritems(timeline_contains),
+ iteritems(previous),
+ iteritems(timeline_start),
+ iteritems(current),
)
}
- c_ids = set(e for e in current.values())
- ts_ids = set(e for e in timeline_start.values())
- p_ids = set(e for e in previous.values())
- tc_ids = set(e for e in timeline_contains.values())
+ c_ids = set(e for e in itervalues(current))
+ ts_ids = set(e for e in itervalues(timeline_start))
+ p_ids = set(e for e in itervalues(previous))
+ tc_ids = set(e for e in itervalues(timeline_contains))
# If we are lazyloading room members, we explicitly add the membership events
# for the senders in the timeline into the state block returned by /sync,
@@ -1753,7 +1857,7 @@ def _calculate_state(
if lazy_load_members:
p_ids.difference_update(
- e for t, e in timeline_start.iteritems()
+ e for t, e in iteritems(timeline_start)
if t[0] == EventTypes.Member
)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 2d2d3d5a..a61bbf93 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.errors import AuthError, SynapseError
from synapse.types import UserID, get_domain_from_id
+from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.logcontext import run_in_background
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
@@ -62,17 +63,28 @@ class TypingHandler(object):
self._member_typing_until = {} # clock time we expect to stop
self._member_last_federation_poke = {}
- # map room IDs to serial numbers
- self._room_serials = {}
self._latest_room_serial = 0
- # map room IDs to sets of users currently typing
- self._room_typing = {}
+ self._reset()
+
+ # caches which room_ids changed at which serials
+ self._typing_stream_change_cache = StreamChangeCache(
+ "TypingStreamChangeCache", self._latest_room_serial,
+ )
self.clock.looping_call(
self._handle_timeouts,
5000,
)
+ def _reset(self):
+ """
+ Reset the typing handler's data caches.
+ """
+ # map room IDs to serial numbers
+ self._room_serials = {}
+ # map room IDs to sets of users currently typing
+ self._room_typing = {}
+
def _handle_timeouts(self):
logger.info("Checking for typing timeouts")
@@ -218,6 +230,7 @@ class TypingHandler(object):
for domain in set(get_domain_from_id(u) for u in users):
if domain != self.server_name:
+ logger.debug("sending typing update to %s", domain)
self.federation.send_edu(
destination=domain,
edu_type="m.typing",
@@ -274,19 +287,29 @@ class TypingHandler(object):
self._latest_room_serial += 1
self._room_serials[member.room_id] = self._latest_room_serial
+ self._typing_stream_change_cache.entity_has_changed(
+ member.room_id, self._latest_room_serial,
+ )
self.notifier.on_new_event(
"typing_key", self._latest_room_serial, rooms=[member.room_id]
)
def get_all_typing_updates(self, last_id, current_id):
- # TODO: Work out a way to do this without scanning the entire state.
if last_id == current_id:
return []
+ changed_rooms = self._typing_stream_change_cache.get_all_entities_changed(
+ last_id,
+ )
+
+ if changed_rooms is None:
+ changed_rooms = self._room_serials
+
rows = []
- for room_id, serial in self._room_serials.items():
- if last_id < serial and serial <= current_id:
+ for room_id in changed_rooms:
+ serial = self._room_serials[room_id]
+ if last_id < serial <= current_id:
typing = self._room_typing[room_id]
rows.append((serial, room_id, list(typing)))
rows.sort()
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index d8413d6a..f11b4301 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -20,6 +20,7 @@ from six import iteritems
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import get_localpart_from_id
from synapse.util.metrics import Measure
@@ -98,7 +99,6 @@ class UserDirectoryHandler(object):
"""
return self.store.search_user_dir(user_id, search_term, limit)
- @defer.inlineCallbacks
def notify_new_event(self):
"""Called when there may be more deltas to process
"""
@@ -108,11 +108,15 @@ class UserDirectoryHandler(object):
if self._is_processing:
return
+ @defer.inlineCallbacks
+ def process():
+ try:
+ yield self._unsafe_process()
+ finally:
+ self._is_processing = False
+
self._is_processing = True
- try:
- yield self._unsafe_process()
- finally:
- self._is_processing = False
+ run_as_background_process("user_directory.notify_new_event", process)
@defer.inlineCallbacks
def handle_local_profile_change(self, user_id, profile):
diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py
index 58ef8d3c..a3f9e4f6 100644
--- a/synapse/http/__init__.py
+++ b/synapse/http/__init__.py
@@ -38,12 +38,12 @@ def cancelled_to_request_timed_out_error(value, timeout):
return value
-ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
+ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
def redact_uri(uri):
"""Strips access tokens from the uri replaces with <redacted>"""
return ACCESS_TOKEN_RE.sub(
- br'\1<redacted>\3',
+ r'\1<redacted>\3',
uri
)
diff --git a/synapse/http/client.py b/synapse/http/client.py
index ab4fbf59..3d05f83b 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -13,24 +13,25 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import logging
-import urllib
-from six import StringIO
+from six import text_type
+from six.moves import urllib
+import treq
from canonicaljson import encode_canonical_json, json
from prometheus_client import Counter
from OpenSSL import SSL
from OpenSSL.SSL import VERIFY_NONE
-from twisted.internet import defer, protocol, reactor, ssl, task
+from twisted.internet import defer, protocol, reactor, ssl
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.web._newclient import ResponseDone
from twisted.web.client import (
Agent,
BrowserLikeRedirectAgent,
ContentDecoderAgent,
- FileBodyProducer as TwistedFileBodyProducer,
GzipDecoder,
HTTPConnectionPool,
PartialDownloadError,
@@ -42,7 +43,7 @@ from twisted.web.http_headers import Headers
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.http import cancelled_to_request_timed_out_error, redact_uri
from synapse.http.endpoint import SpiderEndpoint
-from synapse.util.async_helpers import add_timeout_to_deferred
+from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.logcontext import make_deferred_yieldable
@@ -83,8 +84,10 @@ class SimpleHttpClient(object):
if hs.config.user_agent_suffix:
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
+ self.user_agent = self.user_agent.encode('ascii')
+
@defer.inlineCallbacks
- def request(self, method, uri, *args, **kwargs):
+ def request(self, method, uri, data=b'', headers=None):
# A small wrapper around self.agent.request() so we can easily attach
# counters to it
outgoing_requests_counter.labels(method).inc()
@@ -93,10 +96,10 @@ class SimpleHttpClient(object):
logger.info("Sending request %s %s", method, redact_uri(uri))
try:
- request_deferred = self.agent.request(
- method, uri, *args, **kwargs
+ request_deferred = treq.request(
+ method, uri, agent=self.agent, data=data, headers=headers
)
- add_timeout_to_deferred(
+ request_deferred = timeout_deferred(
request_deferred, 60, self.hs.get_reactor(),
cancelled_to_request_timed_out_error,
)
@@ -112,7 +115,7 @@ class SimpleHttpClient(object):
incoming_responses_counter.labels(method, "ERR").inc()
logger.info(
"Error sending request to %s %s: %s %s",
- method, redact_uri(uri), type(e).__name__, e.message
+ method, redact_uri(uri), type(e).__name__, e.args[0]
)
raise
@@ -137,7 +140,8 @@ class SimpleHttpClient(object):
# TODO: Do we ever want to log message contents?
logger.debug("post_urlencoded_get_json args: %s", args)
- query_bytes = urllib.urlencode(encode_urlencode_args(args), True)
+ query_bytes = urllib.parse.urlencode(
+ encode_urlencode_args(args), True).encode("utf8")
actual_headers = {
b"Content-Type": [b"application/x-www-form-urlencoded"],
@@ -148,15 +152,14 @@ class SimpleHttpClient(object):
response = yield self.request(
"POST",
- uri.encode("ascii"),
+ uri,
headers=Headers(actual_headers),
- bodyProducer=FileBodyProducer(StringIO(query_bytes))
+ data=query_bytes
)
- body = yield make_deferred_yieldable(readBody(response))
-
if 200 <= response.code < 300:
- defer.returnValue(json.loads(body))
+ body = yield make_deferred_yieldable(treq.json_content(response))
+ defer.returnValue(body)
else:
raise HttpResponseException(response.code, response.phrase, body)
@@ -191,9 +194,9 @@ class SimpleHttpClient(object):
response = yield self.request(
"POST",
- uri.encode("ascii"),
+ uri,
headers=Headers(actual_headers),
- bodyProducer=FileBodyProducer(StringIO(json_str))
+ data=json_str
)
body = yield make_deferred_yieldable(readBody(response))
@@ -248,7 +251,7 @@ class SimpleHttpClient(object):
ValueError: if the response was not JSON
"""
if len(args):
- query_bytes = urllib.urlencode(args, True)
+ query_bytes = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_bytes)
json_str = encode_canonical_json(json_body)
@@ -262,9 +265,9 @@ class SimpleHttpClient(object):
response = yield self.request(
"PUT",
- uri.encode("ascii"),
+ uri,
headers=Headers(actual_headers),
- bodyProducer=FileBodyProducer(StringIO(json_str))
+ data=json_str
)
body = yield make_deferred_yieldable(readBody(response))
@@ -293,7 +296,7 @@ class SimpleHttpClient(object):
HttpResponseException on a non-2xx HTTP response.
"""
if len(args):
- query_bytes = urllib.urlencode(args, True)
+ query_bytes = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_bytes)
actual_headers = {
@@ -304,7 +307,7 @@ class SimpleHttpClient(object):
response = yield self.request(
"GET",
- uri.encode("ascii"),
+ uri,
headers=Headers(actual_headers),
)
@@ -339,13 +342,14 @@ class SimpleHttpClient(object):
response = yield self.request(
"GET",
- url.encode("ascii"),
+ url,
headers=Headers(actual_headers),
)
resp_headers = dict(response.headers.getAllRawHeaders())
- if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size:
+ if (b'Content-Length' in resp_headers and
+ int(resp_headers[b'Content-Length']) > max_size):
logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
raise SynapseError(
502,
@@ -378,7 +382,12 @@ class SimpleHttpClient(object):
)
defer.returnValue(
- (length, resp_headers, response.request.absoluteURI, response.code),
+ (
+ length,
+ resp_headers,
+ response.request.absoluteURI.decode('ascii'),
+ response.code,
+ ),
)
@@ -434,12 +443,12 @@ class CaptchaServerHttpClient(SimpleHttpClient):
@defer.inlineCallbacks
def post_urlencoded_get_raw(self, url, args={}):
- query_bytes = urllib.urlencode(encode_urlencode_args(args), True)
+ query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True)
response = yield self.request(
"POST",
- url.encode("ascii"),
- bodyProducer=FileBodyProducer(StringIO(query_bytes)),
+ url,
+ data=query_bytes,
headers=Headers({
b"Content-Type": [b"application/x-www-form-urlencoded"],
b"User-Agent": [self.user_agent],
@@ -463,9 +472,9 @@ class SpiderEndpointFactory(object):
def endpointForURI(self, uri):
logger.info("Getting endpoint for %s", uri.toBytes())
- if uri.scheme == "http":
+ if uri.scheme == b"http":
endpoint_factory = HostnameEndpoint
- elif uri.scheme == "https":
+ elif uri.scheme == b"https":
tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port)
def endpoint_factory(reactor, host, port, **kw):
@@ -510,7 +519,7 @@ def encode_urlencode_args(args):
def encode_urlencode_arg(arg):
- if isinstance(arg, unicode):
+ if isinstance(arg, text_type):
return arg.encode('utf-8')
elif isinstance(arg, list):
return [encode_urlencode_arg(i) for i in arg]
@@ -542,26 +551,3 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
def creatorForNetloc(self, hostname, port):
return self
-
-
-class FileBodyProducer(TwistedFileBodyProducer):
- """Workaround for https://twistedmatrix.com/trac/ticket/8473
-
- We override the pauseProducing and resumeProducing methods in twisted's
- FileBodyProducer so that they do not raise exceptions if the task has
- already completed.
- """
-
- def pauseProducing(self):
- try:
- super(FileBodyProducer, self).pauseProducing()
- except task.TaskDone:
- # task has already completed
- pass
-
- def resumeProducing(self):
- try:
- super(FileBodyProducer, self).resumeProducing()
- except task.NotPaused:
- # task was not paused (probably because it had already completed)
- pass
diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py
index b0c93695..91025037 100644
--- a/synapse/http/endpoint.py
+++ b/synapse/http/endpoint.py
@@ -108,7 +108,7 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=
Args:
reactor: Twisted reactor.
- destination (bytes): The name of the server to connect to.
+ destination (unicode): The name of the server to connect to.
tls_client_options_factory
(synapse.crypto.context_factory.ClientTLSOptionsFactory):
Factory which generates TLS options for client connections.
@@ -126,10 +126,17 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=
transport_endpoint = HostnameEndpoint
default_port = 8008
else:
+ # the SNI string should be the same as the Host header, minus the port.
+ # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
+ # the Host header and SNI should therefore be the server_name of the remote
+ # server.
+ tls_options = tls_client_options_factory.get_options(domain)
+
def transport_endpoint(reactor, host, port, timeout):
return wrapClientTLS(
- tls_client_options_factory.get_options(host),
- HostnameEndpoint(reactor, host, port, timeout=timeout))
+ tls_options,
+ HostnameEndpoint(reactor, host, port, timeout=timeout),
+ )
default_port = 8448
if port is None:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index b34bb8e3..24b6110c 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -17,19 +17,22 @@ import cgi
import logging
import random
import sys
-import urllib
+from io import BytesIO
-from six import string_types
-from six.moves.urllib import parse as urlparse
+from six import PY3, string_types
+from six.moves import urllib
-from canonicaljson import encode_canonical_json, json
+import attr
+import treq
+from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
-from twisted.internet import defer, protocol, reactor
+from twisted.internet import defer, protocol
from twisted.internet.error import DNSLookupError
+from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, HTTPConnectionPool, readBody
+from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
from twisted.web.http_headers import Headers
import synapse.metrics
@@ -40,14 +43,12 @@ from synapse.api.errors import (
HttpResponseException,
SynapseError,
)
-from synapse.http import cancelled_to_request_timed_out_error
from synapse.http.endpoint import matrix_federation_endpoint
-from synapse.util import logcontext
-from synapse.util.async_helpers import add_timeout_to_deferred
+from synapse.util.async_helpers import timeout_deferred
from synapse.util.logcontext import make_deferred_yieldable
+from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
-outbound_logger = logging.getLogger("synapse.http.outbound")
outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests",
"", ["method"])
@@ -58,20 +59,119 @@ incoming_responses_counter = Counter("synapse_http_matrixfederationclient_respon
MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3
+if PY3:
+ MAXINT = sys.maxsize
+else:
+ MAXINT = sys.maxint
+
class MatrixFederationEndpointFactory(object):
def __init__(self, hs):
+ self.reactor = hs.get_reactor()
self.tls_client_options_factory = hs.tls_client_options_factory
def endpointForURI(self, uri):
- destination = uri.netloc
+ destination = uri.netloc.decode('ascii')
return matrix_federation_endpoint(
- reactor, destination, timeout=10,
+ self.reactor, destination, timeout=10,
tls_client_options_factory=self.tls_client_options_factory
)
+_next_id = 1
+
+
+@attr.s
+class MatrixFederationRequest(object):
+ method = attr.ib()
+ """HTTP method
+ :type: str
+ """
+
+ path = attr.ib()
+ """HTTP path
+ :type: str
+ """
+
+ destination = attr.ib()
+ """The remote server to send the HTTP request to.
+ :type: str"""
+
+ json = attr.ib(default=None)
+ """JSON to send in the body.
+ :type: dict|None
+ """
+
+ json_callback = attr.ib(default=None)
+ """A callback to generate the JSON.
+ :type: func|None
+ """
+
+ query = attr.ib(default=None)
+ """Query arguments.
+ :type: dict|None
+ """
+
+ txn_id = attr.ib(default=None)
+ """Unique ID for this request (for logging)
+ :type: str|None
+ """
+
+ def __attrs_post_init__(self):
+ global _next_id
+ self.txn_id = "%s-O-%s" % (self.method, _next_id)
+ _next_id = (_next_id + 1) % (MAXINT - 1)
+
+ def get_json(self):
+ if self.json_callback:
+ return self.json_callback()
+ return self.json
+
+
+@defer.inlineCallbacks
+def _handle_json_response(reactor, timeout_sec, request, response):
+ """
+ Reads the JSON body of a response, with a timeout
+
+ Args:
+ reactor (IReactor): twisted reactor, for the timeout
+ timeout_sec (float): number of seconds to wait for response to complete
+ request (MatrixFederationRequest): the request that triggered the response
+ response (IResponse): response to the request
+
+ Returns:
+ dict: parsed JSON response
+ """
+ try:
+ check_content_type_is_json(response.headers)
+
+ d = treq.json_content(response)
+ d = timeout_deferred(
+ d,
+ timeout=timeout_sec,
+ reactor=reactor,
+ )
+
+ body = yield make_deferred_yieldable(d)
+ except Exception as e:
+ logger.warn(
+ "{%s} [%s] Error reading response: %s",
+ request.txn_id,
+ request.destination,
+ e,
+ )
+ raise
+ logger.info(
+ "{%s} [%s] Completed: %d %s",
+ request.txn_id,
+ request.destination,
+ response.code,
+ response.phrase.decode('ascii', errors='replace'),
+ )
+ defer.returnValue(body)
+
+
class MatrixFederationHttpClient(object):
"""HTTP client used to talk to other homeservers over the federation
protocol. Send client certificates and signs requests.
@@ -85,7 +185,9 @@ class MatrixFederationHttpClient(object):
self.hs = hs
self.signing_key = hs.config.signing_key[0]
self.server_name = hs.hostname
+ reactor = hs.get_reactor()
pool = HTTPConnectionPool(reactor)
+ pool.retryAutomatically = False
pool.maxPersistentPerHost = 5
pool.cachedConnectionTimeout = 2 * 60
self.agent = Agent.usingEndpointFactory(
@@ -93,34 +195,42 @@ class MatrixFederationHttpClient(object):
)
self.clock = hs.get_clock()
self._store = hs.get_datastore()
- self.version_string = hs.version_string
- self._next_id = 1
+ self.version_string_bytes = hs.version_string.encode('ascii')
+ self.default_timeout = 60
- def _create_url(self, destination, path_bytes, param_bytes, query_bytes):
- return urlparse.urlunparse(
- ("matrix", destination, path_bytes, param_bytes, query_bytes, "")
- )
+ def schedule(x):
+ reactor.callLater(_EPSILON, x)
+
+ self._cooperator = Cooperator(scheduler=schedule)
@defer.inlineCallbacks
- def _request(self, destination, method, path,
- body_callback, headers_dict={}, param_bytes=b"",
- query_bytes=b"", retry_on_dns_fail=True,
- timeout=None, long_retries=False,
- ignore_backoff=False,
- backoff_on_404=False):
- """ Creates and sends a request to the given server
+ def _send_request(
+ self,
+ request,
+ retry_on_dns_fail=True,
+ timeout=None,
+ long_retries=False,
+ ignore_backoff=False,
+ backoff_on_404=False
+ ):
+ """
+ Sends a request to the given server.
+
Args:
- destination (str): The remote server to send the HTTP request to.
- method (str): HTTP method
- path (str): The HTTP path
+ request (MatrixFederationRequest): details of request to be sent
+
+ timeout (int|None): number of milliseconds to wait for the response headers
+ (including connecting to the server). 60s by default.
+
ignore_backoff (bool): true to ignore the historical backoff data
and try the request anyway.
+
backoff_on_404 (bool): Back off if we get a 404
Returns:
Deferred: resolves with the http response object on success.
- Fails with ``HTTPRequestException``: if we get an HTTP response
+ Fails with ``HttpResponseException``: if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -132,38 +242,39 @@ class MatrixFederationHttpClient(object):
(May also fail with plenty of other Exceptions for things like DNS
failures, connection failures, SSL failures.)
"""
+ if timeout:
+ _sec_timeout = timeout / 1000
+ else:
+ _sec_timeout = self.default_timeout
+
if (
self.hs.config.federation_domain_whitelist is not None and
- destination not in self.hs.config.federation_domain_whitelist
+ request.destination not in self.hs.config.federation_domain_whitelist
):
- raise FederationDeniedError(destination)
+ raise FederationDeniedError(request.destination)
limiter = yield synapse.util.retryutils.get_retry_limiter(
- destination,
+ request.destination,
self.clock,
self._store,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
)
- destination = destination.encode("ascii")
- path_bytes = path.encode("ascii")
- with limiter:
- headers_dict[b"User-Agent"] = [self.version_string]
- headers_dict[b"Host"] = [destination]
-
- url_bytes = self._create_url(
- destination, path_bytes, param_bytes, query_bytes
- )
-
- txn_id = "%s-O-%s" % (method, self._next_id)
- self._next_id = (self._next_id + 1) % (sys.maxint - 1)
+ method_bytes = request.method.encode("ascii")
+ destination_bytes = request.destination.encode("ascii")
+ path_bytes = request.path.encode("ascii")
+ if request.query:
+ query_bytes = encode_query_args(request.query)
+ else:
+ query_bytes = b""
- outbound_logger.info(
- "{%s} [%s] Sending request: %s %s",
- txn_id, destination, method, url_bytes
- )
+ headers_dict = {
+ b"User-Agent": [self.version_string_bytes],
+ b"Host": [destination_bytes],
+ }
+ with limiter:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
@@ -171,88 +282,120 @@ class MatrixFederationHttpClient(object):
else:
retries_left = MAX_SHORT_RETRIES
- http_url_bytes = urlparse.urlunparse(
- ("", "", path_bytes, param_bytes, query_bytes, "")
- )
+ url_bytes = urllib.parse.urlunparse((
+ b"matrix", destination_bytes,
+ path_bytes, None, query_bytes, b"",
+ ))
+ url_str = url_bytes.decode('ascii')
+
+ url_to_sign_bytes = urllib.parse.urlunparse((
+ b"", b"",
+ path_bytes, None, query_bytes, b"",
+ ))
- log_result = None
- try:
- while True:
- producer = None
- if body_callback:
- producer = body_callback(method, http_url_bytes, headers_dict)
-
- try:
- request_deferred = self.agent.request(
- method,
- url_bytes,
- Headers(headers_dict),
- producer
+ while True:
+ try:
+ json = request.get_json()
+ if json:
+ headers_dict[b"Content-Type"] = [b"application/json"]
+ self.sign_request(
+ destination_bytes, method_bytes, url_to_sign_bytes,
+ headers_dict, json,
)
- add_timeout_to_deferred(
- request_deferred,
- timeout / 1000. if timeout else 60,
- self.hs.get_reactor(),
- cancelled_to_request_timed_out_error,
+ data = encode_canonical_json(json)
+ producer = FileBodyProducer(
+ BytesIO(data),
+ cooperator=self._cooperator,
+ )
+ else:
+ producer = None
+ self.sign_request(
+ destination_bytes, method_bytes, url_to_sign_bytes,
+ headers_dict,
)
+
+ logger.info(
+ "{%s} [%s] Sending request: %s %s",
+ request.txn_id, request.destination, request.method,
+ url_str,
+ )
+
+ # we don't want all the fancy cookie and redirect handling that
+ # treq.request gives: just use the raw Agent.
+ request_deferred = self.agent.request(
+ method_bytes,
+ url_bytes,
+ headers=Headers(headers_dict),
+ bodyProducer=producer,
+ )
+
+ request_deferred = timeout_deferred(
+ request_deferred,
+ timeout=_sec_timeout,
+ reactor=self.hs.get_reactor(),
+ )
+
+ with Measure(self.clock, "outbound_request"):
response = yield make_deferred_yieldable(
request_deferred,
)
- log_result = "%d %s" % (response.code, response.phrase,)
- break
- except Exception as e:
- if not retry_on_dns_fail and isinstance(e, DNSLookupError):
- logger.warn(
- "DNS Lookup failed to %s with %s",
- destination,
- e
- )
- log_result = "DNS Lookup failed to %s with %s" % (
- destination, e
- )
- raise
-
- logger.warn(
- "{%s} Sending request failed to %s: %s %s: %s",
- txn_id,
- destination,
- method,
- url_bytes,
- _flatten_response_never_received(e),
+ break
+ except Exception as e:
+ logger.warn(
+ "{%s} [%s] Request failed: %s %s: %s",
+ request.txn_id,
+ request.destination,
+ request.method,
+ url_str,
+ _flatten_response_never_received(e),
+ )
+
+ if not retry_on_dns_fail and isinstance(e, DNSLookupError):
+ raise
+
+ if retries_left and not timeout:
+ if long_retries:
+ delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
+ delay = min(delay, 60)
+ delay *= random.uniform(0.8, 1.4)
+ else:
+ delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
+ delay = min(delay, 2)
+ delay *= random.uniform(0.8, 1.4)
+
+ logger.debug(
+ "{%s} [%s] Waiting %ss before re-sending...",
+ request.txn_id,
+ request.destination,
+ delay,
)
- log_result = _flatten_response_never_received(e)
-
- if retries_left and not timeout:
- if long_retries:
- delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
- delay = min(delay, 60)
- delay *= random.uniform(0.8, 1.4)
- else:
- delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
- delay = min(delay, 2)
- delay *= random.uniform(0.8, 1.4)
-
- yield self.clock.sleep(delay)
- retries_left -= 1
- else:
- raise
- finally:
- outbound_logger.info(
- "{%s} [%s] Result: %s",
- txn_id,
- destination,
- log_result,
- )
+ yield self.clock.sleep(delay)
+ retries_left -= 1
+ else:
+ raise
+
+ logger.info(
+ "{%s} [%s] Got response headers: %d %s",
+ request.txn_id,
+ request.destination,
+ response.code,
+ response.phrase.decode('ascii', errors='replace'),
+ )
if 200 <= response.code < 300:
pass
else:
# :'(
# Update transactions table?
- with logcontext.PreserveLoggingContext():
- body = yield readBody(response)
+ d = treq.content(response)
+ d = timeout_deferred(
+ d,
+ timeout=_sec_timeout,
+ reactor=self.hs.get_reactor(),
+ )
+ body = yield make_deferred_yieldable(d)
raise HttpResponseException(
response.code, response.phrase, body
)
@@ -269,8 +412,9 @@ class MatrixFederationHttpClient(object):
destination_is must be non-None.
method (bytes): The HTTP method of the request
url_bytes (bytes): The URI path of the request
- headers_dict (dict): Dictionary of request headers to append to
- content (bytes): The body of the request
+ headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to
+ append to
+ content (object): The body of the request
destination_is (bytes): As 'destination', but if the destination is an
identity server
@@ -297,11 +441,11 @@ class MatrixFederationHttpClient(object):
auth_headers = []
for key, sig in request["signatures"][self.server_name].items():
- auth_headers.append(bytes(
+ auth_headers.append((
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
self.server_name, key, sig,
- )
- ))
+ )).encode('ascii')
+ )
headers_dict[b"Authorization"] = auth_headers
@@ -336,7 +480,7 @@ class MatrixFederationHttpClient(object):
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
- Fails with ``HTTPRequestException`` if we get an HTTP response
+ Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -346,38 +490,27 @@ class MatrixFederationHttpClient(object):
is not on our federation whitelist
"""
- if not json_data_callback:
- def json_data_callback():
- return data
+ request = MatrixFederationRequest(
+ method="PUT",
+ destination=destination,
+ path=path,
+ query=args,
+ json_callback=json_data_callback,
+ json=data,
+ )
- def body_callback(method, url_bytes, headers_dict):
- json_data = json_data_callback()
- self.sign_request(
- destination, method, url_bytes, headers_dict, json_data
- )
- producer = _JsonProducer(json_data)
- return producer
-
- response = yield self._request(
- destination,
- "PUT",
- path,
- body_callback=body_callback,
- headers_dict={"Content-Type": ["application/json"]},
- query_bytes=encode_query_args(args),
+ response = yield self._send_request(
+ request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
backoff_on_404=backoff_on_404,
)
- if 200 <= response.code < 300:
- # We need to update the transactions table to say it was sent?
- check_content_type_is_json(response.headers)
-
- with logcontext.PreserveLoggingContext():
- body = yield readBody(response)
- defer.returnValue(json.loads(body))
+ body = yield _handle_json_response(
+ self.hs.get_reactor(), self.default_timeout, request, response,
+ )
+ defer.returnValue(body)
@defer.inlineCallbacks
def post_json(self, destination, path, data={}, long_retries=False,
@@ -401,7 +534,7 @@ class MatrixFederationHttpClient(object):
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
- Fails with ``HTTPRequestException`` if we get an HTTP response
+ Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -411,32 +544,30 @@ class MatrixFederationHttpClient(object):
is not on our federation whitelist
"""
- def body_callback(method, url_bytes, headers_dict):
- self.sign_request(
- destination, method, url_bytes, headers_dict, data
- )
- return _JsonProducer(data)
-
- response = yield self._request(
- destination,
- "POST",
- path,
- query_bytes=encode_query_args(args),
- body_callback=body_callback,
- headers_dict={"Content-Type": ["application/json"]},
+ request = MatrixFederationRequest(
+ method="POST",
+ destination=destination,
+ path=path,
+ query=args,
+ json=data,
+ )
+
+ response = yield self._send_request(
+ request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
- if 200 <= response.code < 300:
- # We need to update the transactions table to say it was sent?
- check_content_type_is_json(response.headers)
-
- with logcontext.PreserveLoggingContext():
- body = yield readBody(response)
+ if timeout:
+ _sec_timeout = timeout / 1000
+ else:
+ _sec_timeout = self.default_timeout
- defer.returnValue(json.loads(body))
+ body = yield _handle_json_response(
+ self.hs.get_reactor(), _sec_timeout, request, response,
+ )
+ defer.returnValue(body)
@defer.inlineCallbacks
def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
@@ -458,7 +589,7 @@ class MatrixFederationHttpClient(object):
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
- Fails with ``HTTPRequestException`` if we get an HTTP response
+ Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -471,29 +602,24 @@ class MatrixFederationHttpClient(object):
logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
- def body_callback(method, url_bytes, headers_dict):
- self.sign_request(destination, method, url_bytes, headers_dict)
- return None
+ request = MatrixFederationRequest(
+ method="GET",
+ destination=destination,
+ path=path,
+ query=args,
+ )
- response = yield self._request(
- destination,
- "GET",
- path,
- query_bytes=encode_query_args(args),
- body_callback=body_callback,
+ response = yield self._send_request(
+ request,
retry_on_dns_fail=retry_on_dns_fail,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
- if 200 <= response.code < 300:
- # We need to update the transactions table to say it was sent?
- check_content_type_is_json(response.headers)
-
- with logcontext.PreserveLoggingContext():
- body = yield readBody(response)
-
- defer.returnValue(json.loads(body))
+ body = yield _handle_json_response(
+ self.hs.get_reactor(), self.default_timeout, request, response,
+ )
+ defer.returnValue(body)
@defer.inlineCallbacks
def delete_json(self, destination, path, long_retries=False,
@@ -514,7 +640,7 @@ class MatrixFederationHttpClient(object):
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
- Fails with ``HTTPRequestException`` if we get an HTTP response
+ Fails with ``HttpResponseException`` if we get an HTTP response
code >= 300.
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -523,26 +649,24 @@ class MatrixFederationHttpClient(object):
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
"""
+ request = MatrixFederationRequest(
+ method="DELETE",
+ destination=destination,
+ path=path,
+ query=args,
+ )
- response = yield self._request(
- destination,
- "DELETE",
- path,
- query_bytes=encode_query_args(args),
- headers_dict={"Content-Type": ["application/json"]},
+ response = yield self._send_request(
+ request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
- if 200 <= response.code < 300:
- # We need to update the transactions table to say it was sent?
- check_content_type_is_json(response.headers)
-
- with logcontext.PreserveLoggingContext():
- body = yield readBody(response)
-
- defer.returnValue(json.loads(body))
+ body = yield _handle_json_response(
+ self.hs.get_reactor(), self.default_timeout, request, response,
+ )
+ defer.returnValue(body)
@defer.inlineCallbacks
def get_file(self, destination, path, output_stream, args={},
@@ -560,7 +684,7 @@ class MatrixFederationHttpClient(object):
Deferred: resolves with an (int,dict) tuple of the file length and
a dict of the response headers.
- Fails with ``HTTPRequestException`` if we get an HTTP response code
+ Fails with ``HttpResponseException`` if we get an HTTP response code
>= 300
Fails with ``NotRetryingDestination`` if we are not yet ready
@@ -569,26 +693,15 @@ class MatrixFederationHttpClient(object):
Fails with ``FederationDeniedError`` if this destination
is not on our federation whitelist
"""
+ request = MatrixFederationRequest(
+ method="GET",
+ destination=destination,
+ path=path,
+ query=args,
+ )
- encoded_args = {}
- for k, vs in args.items():
- if isinstance(vs, string_types):
- vs = [vs]
- encoded_args[k] = [v.encode("UTF-8") for v in vs]
-
- query_bytes = urllib.urlencode(encoded_args, True)
- logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail)
-
- def body_callback(method, url_bytes, headers_dict):
- self.sign_request(destination, method, url_bytes, headers_dict)
- return None
-
- response = yield self._request(
- destination,
- "GET",
- path,
- query_bytes=query_bytes,
- body_callback=body_callback,
+ response = yield self._send_request(
+ request,
retry_on_dns_fail=retry_on_dns_fail,
ignore_backoff=ignore_backoff,
)
@@ -596,14 +709,25 @@ class MatrixFederationHttpClient(object):
headers = dict(response.headers.getAllRawHeaders())
try:
- with logcontext.PreserveLoggingContext():
- length = yield _readBodyToFile(
- response, output_stream, max_size
- )
- except Exception:
- logger.exception("Failed to download body")
+ d = _readBodyToFile(response, output_stream, max_size)
+ d.addTimeout(self.default_timeout, self.hs.get_reactor())
+ length = yield make_deferred_yieldable(d)
+ except Exception as e:
+ logger.warn(
+ "{%s} [%s] Error reading response: %s",
+ request.txn_id,
+ request.destination,
+ e,
+ )
raise
-
+ logger.info(
+ "{%s} [%s] Completed: %d %s [%d bytes]",
+ request.txn_id,
+ request.destination,
+ response.code,
+ response.phrase.decode('ascii', errors='replace'),
+ length,
+ )
defer.returnValue((length, headers))
@@ -639,30 +763,6 @@ def _readBodyToFile(response, stream, max_size):
return d
-class _JsonProducer(object):
- """ Used by the twisted http client to create the HTTP body from json
- """
- def __init__(self, jsn):
- self.reset(jsn)
-
- def reset(self, jsn):
- self.body = encode_canonical_json(jsn)
- self.length = len(self.body)
-
- def startProducing(self, consumer):
- consumer.write(self.body)
- return defer.succeed(None)
-
- def pauseProducing(self):
- pass
-
- def stopProducing(self):
- pass
-
- def resumeProducing(self):
- pass
-
-
def _flatten_response_never_received(e):
if hasattr(e, "reasons"):
reasons = ", ".join(
@@ -693,7 +793,7 @@ def check_content_type_is_json(headers):
"No Content-Type header"
)
- c_type = c_type[0] # only the first header
+ c_type = c_type[0].decode('ascii') # only the first header
val, options = cgi.parse_header(c_type)
if val != "application/json":
raise RuntimeError(
@@ -711,6 +811,6 @@ def encode_query_args(args):
vs = [vs]
encoded_args[k] = [v.encode("UTF-8") for v in vs]
- query_bytes = urllib.urlencode(encoded_args, True)
+ query_bytes = urllib.parse.urlencode(encoded_args, True)
- return query_bytes
+ return query_bytes.encode('utf8')
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index 72c26546..62045a91 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -39,7 +39,8 @@ outgoing_responses_counter = Counter(
)
response_timer = Histogram(
- "synapse_http_server_response_time_seconds", "sec",
+ "synapse_http_server_response_time_seconds",
+ "sec",
["method", "servlet", "tag", "code"],
)
@@ -79,15 +80,11 @@ response_size = Counter(
# than when the response was written.
in_flight_requests_ru_utime = Counter(
- "synapse_http_server_in_flight_requests_ru_utime_seconds",
- "",
- ["method", "servlet"],
+ "synapse_http_server_in_flight_requests_ru_utime_seconds", "", ["method", "servlet"]
)
in_flight_requests_ru_stime = Counter(
- "synapse_http_server_in_flight_requests_ru_stime_seconds",
- "",
- ["method", "servlet"],
+ "synapse_http_server_in_flight_requests_ru_stime_seconds", "", ["method", "servlet"]
)
in_flight_requests_db_txn_count = Counter(
@@ -134,7 +131,7 @@ def _get_in_flight_counts():
# type
counts = {}
for rm in reqs:
- key = (rm.method, rm.name,)
+ key = (rm.method, rm.name)
counts[key] = counts.get(key, 0) + 1
return counts
@@ -162,7 +159,7 @@ class RequestMetrics(object):
with _in_flight_requests_lock:
_in_flight_requests.add(self)
- def stop(self, time_sec, request):
+ def stop(self, time_sec, response_code, sent_bytes):
with _in_flight_requests_lock:
_in_flight_requests.discard(self)
@@ -175,39 +172,40 @@ class RequestMetrics(object):
if context != self.start_context:
logger.warn(
"Context have unexpectedly changed %r, %r",
- context, self.start_context
+ context,
+ self.start_context,
)
return
- response_code = str(request.code)
+ response_code = str(response_code)
- outgoing_responses_counter.labels(request.method, response_code).inc()
+ outgoing_responses_counter.labels(self.method, response_code).inc()
- response_count.labels(request.method, self.name, tag).inc()
+ response_count.labels(self.method, self.name, tag).inc()
- response_timer.labels(request.method, self.name, tag, response_code).observe(
+ response_timer.labels(self.method, self.name, tag, response_code).observe(
time_sec - self.start
)
resource_usage = context.get_resource_usage()
- response_ru_utime.labels(request.method, self.name, tag).inc(
- resource_usage.ru_utime,
+ response_ru_utime.labels(self.method, self.name, tag).inc(
+ resource_usage.ru_utime
)
- response_ru_stime.labels(request.method, self.name, tag).inc(
- resource_usage.ru_stime,
+ response_ru_stime.labels(self.method, self.name, tag).inc(
+ resource_usage.ru_stime
)
- response_db_txn_count.labels(request.method, self.name, tag).inc(
+ response_db_txn_count.labels(self.method, self.name, tag).inc(
resource_usage.db_txn_count
)
- response_db_txn_duration.labels(request.method, self.name, tag).inc(
+ response_db_txn_duration.labels(self.method, self.name, tag).inc(
resource_usage.db_txn_duration_sec
)
- response_db_sched_duration.labels(request.method, self.name, tag).inc(
+ response_db_sched_duration.labels(self.method, self.name, tag).inc(
resource_usage.db_sched_duration_sec
)
- response_size.labels(request.method, self.name, tag).inc(request.sentLength)
+ response_size.labels(self.method, self.name, tag).inc(sent_bytes)
# We always call this at the end to ensure that we update the metrics
# regardless of whether a call to /metrics while the request was in
@@ -222,8 +220,15 @@ class RequestMetrics(object):
diff = new_stats - self._request_stats
self._request_stats = new_stats
- in_flight_requests_ru_utime.labels(self.method, self.name).inc(diff.ru_utime)
- in_flight_requests_ru_stime.labels(self.method, self.name).inc(diff.ru_stime)
+ # max() is used since rapid use of ru_stime/ru_utime can end up with the
+ # count going backwards due to NTP, time smearing, fine-grained
+ # correction, or floating points. Who knows, really?
+ in_flight_requests_ru_utime.labels(self.method, self.name).inc(
+ max(diff.ru_utime, 0)
+ )
+ in_flight_requests_ru_stime.labels(self.method, self.name).inc(
+ max(diff.ru_stime, 0)
+ )
in_flight_requests_db_txn_count.labels(self.method, self.name).inc(
diff.db_txn_count
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 2d5c23e6..6a427d96 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -84,10 +84,21 @@ def wrap_json_request_handler(h):
logger.info(
"%s SynapseError: %s - %s", request, code, e.msg
)
- respond_with_json(
- request, code, e.error_dict(), send_cors=True,
- pretty_print=_request_user_agent_is_curl(request),
- )
+
+ # Only respond with an error response if we haven't already started
+ # writing, otherwise lets just kill the connection
+ if request.startedWriting:
+ if request.transport:
+ try:
+ request.transport.abortConnection()
+ except Exception:
+ # abortConnection throws if the connection is already closed
+ pass
+ else:
+ respond_with_json(
+ request, code, e.error_dict(), send_cors=True,
+ pretty_print=_request_user_agent_is_curl(request),
+ )
except Exception:
# failure.Failure() fishes the original Failure out
@@ -100,16 +111,26 @@ def wrap_json_request_handler(h):
request,
f.getTraceback().rstrip(),
)
- respond_with_json(
- request,
- 500,
- {
- "error": "Internal server error",
- "errcode": Codes.UNKNOWN,
- },
- send_cors=True,
- pretty_print=_request_user_agent_is_curl(request),
- )
+ # Only respond with an error response if we haven't already started
+ # writing, otherwise lets just kill the connection
+ if request.startedWriting:
+ if request.transport:
+ try:
+ request.transport.abortConnection()
+ except Exception:
+ # abortConnection throws if the connection is already closed
+ pass
+ else:
+ respond_with_json(
+ request,
+ 500,
+ {
+ "error": "Internal server error",
+ "errcode": Codes.UNKNOWN,
+ },
+ send_cors=True,
+ pretty_print=_request_user_agent_is_curl(request),
+ )
return wrap_async_request_handler(wrapped_request_handler)
@@ -447,13 +468,13 @@ def set_cors_headers(request):
Args:
request (twisted.web.http.Request): The http request to add CORs to.
"""
- request.setHeader("Access-Control-Allow-Origin", "*")
+ request.setHeader(b"Access-Control-Allow-Origin", b"*")
request.setHeader(
- "Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS"
+ b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
)
request.setHeader(
- "Access-Control-Allow-Headers",
- "Origin, X-Requested-With, Content-Type, Accept, Authorization"
+ b"Access-Control-Allow-Headers",
+ b"Origin, X-Requested-With, Content-Type, Accept, Authorization"
)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index a1e4b88e..528125e7 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -121,16 +121,15 @@ def parse_string(request, name, default=None, required=False,
Args:
request: the twisted HTTP request.
- name (bytes/unicode): the name of the query parameter.
- default (bytes/unicode|None): value to use if the parameter is absent,
+ name (bytes|unicode): the name of the query parameter.
+ default (bytes|unicode|None): value to use if the parameter is absent,
defaults to None. Must be bytes if encoding is None.
required (bool): whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
- allowed_values (list[bytes/unicode]): List of allowed values for the
+ allowed_values (list[bytes|unicode]): List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
- encoding: The encoding to decode the name to, and decode the string
- content with.
+ encoding (str|None): The encoding to decode the string content with.
Returns:
bytes/unicode|None: A string value or the default. Unicode if encoding
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 88ed3714..e508c0bd 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -75,17 +75,35 @@ class SynapseRequest(Request):
return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % (
self.__class__.__name__,
id(self),
- self.method,
+ self.get_method(),
self.get_redacted_uri(),
- self.clientproto,
+ self.clientproto.decode('ascii', errors='replace'),
self.site.site_tag,
)
def get_request_id(self):
- return "%s-%i" % (self.method, self.request_seq)
+ return "%s-%i" % (self.get_method(), self.request_seq)
def get_redacted_uri(self):
- return redact_uri(self.uri)
+ uri = self.uri
+ if isinstance(uri, bytes):
+ uri = self.uri.decode('ascii')
+ return redact_uri(uri)
+
+ def get_method(self):
+ """Gets the method associated with the request (or placeholder if not
+ method has yet been received).
+
+ Note: This is necessary as the placeholder value in twisted is str
+ rather than bytes, so we need to sanitise `self.method`.
+
+ Returns:
+ str
+ """
+ method = self.method
+ if isinstance(method, bytes):
+ method = self.method.decode('ascii')
+ return method
def get_user_agent(self):
return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1]
@@ -116,7 +134,7 @@ class SynapseRequest(Request):
# dispatching to the handler, so that the handler
# can update the servlet name in the request
# metrics
- requests_counter.labels(self.method,
+ requests_counter.labels(self.get_method(),
self.request_metrics.name).inc()
@contextlib.contextmanager
@@ -204,14 +222,14 @@ class SynapseRequest(Request):
self.start_time = time.time()
self.request_metrics = RequestMetrics()
self.request_metrics.start(
- self.start_time, name=servlet_name, method=self.method,
+ self.start_time, name=servlet_name, method=self.get_method(),
)
self.site.access_logger.info(
"%s - %s - Received request: %s %s",
self.getClientIP(),
self.site.site_tag,
- self.method,
+ self.get_method(),
self.get_redacted_uri()
)
@@ -277,15 +295,15 @@ class SynapseRequest(Request):
int(usage.db_txn_count),
self.sentLength,
code,
- self.method,
+ self.get_method(),
self.get_redacted_uri(),
- self.clientproto,
+ self.clientproto.decode('ascii', errors='replace'),
user_agent,
usage.evt_db_fetch_count,
)
try:
- self.request_metrics.stop(self.finish_time, self)
+ self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
except Exception as e:
logger.warn("Failed to stop metrics: %r", e)
@@ -305,7 +323,7 @@ class XForwardedForRequest(SynapseRequest):
C{b"-"}.
"""
return self.requestHeaders.getRawHeaders(
- b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
+ b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip().decode('ascii')
class SynapseRequestFactory(object):
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 550f8443..59900aa5 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -18,8 +18,11 @@ import gc
import logging
import os
import platform
+import threading
import time
+import six
+
import attr
from prometheus_client import Counter, Gauge, Histogram
from prometheus_client.core import REGISTRY, GaugeMetricFamily
@@ -68,7 +71,7 @@ class LaterGauge(object):
return
if isinstance(calls, dict):
- for k, v in calls.items():
+ for k, v in six.iteritems(calls):
g.add_metric(k, v)
else:
g.add_metric([], calls)
@@ -87,6 +90,109 @@ class LaterGauge(object):
all_gauges[self.name] = self
+class InFlightGauge(object):
+ """Tracks number of things (e.g. requests, Measure blocks, etc) in flight
+ at any given time.
+
+ Each InFlightGauge will create a metric called `<name>_total` that counts
+ the number of in flight blocks, as well as a metrics for each item in the
+ given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
+ callbacks.
+
+ Args:
+ name (str)
+ desc (str)
+ labels (list[str])
+ sub_metrics (list[str]): A list of sub metrics that the callbacks
+ will update.
+ """
+
+ def __init__(self, name, desc, labels, sub_metrics):
+ self.name = name
+ self.desc = desc
+ self.labels = labels
+ self.sub_metrics = sub_metrics
+
+ # Create a class which have the sub_metrics values as attributes, which
+ # default to 0 on initialization. Used to pass to registered callbacks.
+ self._metrics_class = attr.make_class(
+ "_MetricsEntry",
+ attrs={x: attr.ib(0) for x in sub_metrics},
+ slots=True,
+ )
+
+ # Counts number of in flight blocks for a given set of label values
+ self._registrations = {}
+
+ # Protects access to _registrations
+ self._lock = threading.Lock()
+
+ self._register_with_collector()
+
+ def register(self, key, callback):
+ """Registers that we've entered a new block with labels `key`.
+
+ `callback` gets called each time the metrics are collected. The same
+ value must also be given to `unregister`.
+
+ `callback` gets called with an object that has an attribute per
+ sub_metric, which should be updated with the necessary values. Note that
+ the metrics object is shared between all callbacks registered with the
+ same key.
+
+ Note that `callback` may be called on a separate thread.
+ """
+ with self._lock:
+ self._registrations.setdefault(key, set()).add(callback)
+
+ def unregister(self, key, callback):
+ """Registers that we've exited a block with labels `key`.
+ """
+
+ with self._lock:
+ self._registrations.setdefault(key, set()).discard(callback)
+
+ def collect(self):
+ """Called by prometheus client when it reads metrics.
+
+ Note: may be called by a separate thread.
+ """
+ in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels)
+
+ metrics_by_key = {}
+
+ # We copy so that we don't mutate the list while iterating
+ with self._lock:
+ keys = list(self._registrations)
+
+ for key in keys:
+ with self._lock:
+ callbacks = set(self._registrations[key])
+
+ in_flight.add_metric(key, len(callbacks))
+
+ metrics = self._metrics_class()
+ metrics_by_key[key] = metrics
+ for callback in callbacks:
+ callback(metrics)
+
+ yield in_flight
+
+ for name in self.sub_metrics:
+ gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels)
+ for key, metrics in six.iteritems(metrics_by_key):
+ gauge.add_metric(key, getattr(metrics, name))
+ yield gauge
+
+ def _register_with_collector(self):
+ if self.name in all_gauges.keys():
+ logger.warning("%s already registered, reregistering" % (self.name,))
+ REGISTRY.unregister(all_gauges.pop(self.name))
+
+ REGISTRY.register(self)
+ all_gauges[self.name] = self
+
+
#
# Detailed CPU metrics
#
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 167167be..037f1c49 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import threading
import six
@@ -23,6 +24,9 @@ from twisted.internet import defer
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+logger = logging.getLogger(__name__)
+
+
_background_process_start_count = Counter(
"synapse_background_process_start_count",
"Number of background processes started",
@@ -97,9 +101,13 @@ class _Collector(object):
labels=["name"],
)
- # We copy the dict so that it doesn't change from underneath us
+ # We copy the dict so that it doesn't change from underneath us.
+ # We also copy the process lists as that can also change
with _bg_metrics_lock:
- _background_processes_copy = dict(_background_processes)
+ _background_processes_copy = {
+ k: list(v)
+ for k, v in six.iteritems(_background_processes)
+ }
for desc, processes in six.iteritems(_background_processes_copy):
background_process_in_flight_count.add_metric(
@@ -191,6 +199,8 @@ def run_as_background_process(desc, func, *args, **kwargs):
try:
yield func(*args, **kwargs)
+ except Exception:
+ logger.exception("Background process '%s' threw an exception", desc)
finally:
proc.update_metrics()
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 82f39148..de02b101 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -24,13 +24,10 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError
from synapse.handlers.presence import format_user_presence_state
from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import StreamToken
-from synapse.util.async_helpers import (
- DeferredTimeoutError,
- ObservableDeferred,
- add_timeout_to_deferred,
-)
-from synapse.util.logcontext import PreserveLoggingContext, run_in_background
+from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
+from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
@@ -189,9 +186,9 @@ class Notifier(object):
def count_listeners():
all_user_streams = set()
- for x in self.room_to_user_streams.values():
+ for x in list(self.room_to_user_streams.values()):
all_user_streams |= x
- for x in self.user_to_user_stream.values():
+ for x in list(self.user_to_user_stream.values()):
all_user_streams.add(x)
return sum(stream.count_listeners() for stream in all_user_streams)
@@ -199,7 +196,7 @@ class Notifier(object):
LaterGauge(
"synapse_notifier_rooms", "", [],
- lambda: count(bool, self.room_to_user_streams.values()),
+ lambda: count(bool, list(self.room_to_user_streams.values())),
)
LaterGauge(
"synapse_notifier_users", "", [],
@@ -252,7 +249,10 @@ class Notifier(object):
def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
"""Notify any user streams that are interested in this room event"""
# poke any interested application service.
- run_in_background(self._notify_app_services, room_stream_id)
+ run_as_background_process(
+ "notify_app_services",
+ self._notify_app_services, room_stream_id,
+ )
if self.federation_sender:
self.federation_sender.notify_new_events(room_stream_id)
@@ -337,7 +337,7 @@ class Notifier(object):
# Now we wait for the _NotifierUserStream to be told there
# is a new token.
listener = user_stream.new_listener(prev_token)
- add_timeout_to_deferred(
+ listener.deferred = timeout_deferred(
listener.deferred,
(end_time - now) / 1000.,
self.hs.get_reactor(),
@@ -354,7 +354,7 @@ class Notifier(object):
# Update the prev_token to the current_token since nothing
# has happened between the old prev_token and the current_token
prev_token = current_token
- except DeferredTimeoutError:
+ except defer.TimeoutError:
break
except defer.CancelledError:
break
@@ -559,15 +559,16 @@ class Notifier(object):
if end_time <= now:
break
- add_timeout_to_deferred(
- listener.deferred.addTimeout,
- (end_time - now) / 1000.,
- self.hs.get_reactor(),
+ listener.deferred = timeout_deferred(
+ listener.deferred,
+ timeout=(end_time - now) / 1000.,
+ reactor=self.hs.get_reactor(),
)
+
try:
with PreserveLoggingContext():
yield listener.deferred
- except DeferredTimeoutError:
+ except defer.TimeoutError:
break
except defer.CancelledError:
break
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index d7463714..50e1007d 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -18,8 +18,7 @@ import logging
from twisted.internet import defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
-from synapse.util.logcontext import LoggingContext
-from synapse.util.metrics import Measure
+from synapse.metrics.background_process_metrics import run_as_background_process
logger = logging.getLogger(__name__)
@@ -71,18 +70,11 @@ class EmailPusher(object):
# See httppusher
self.max_stream_ordering = None
- self.processing = False
+ self._is_processing = False
- @defer.inlineCallbacks
def on_started(self):
if self.mailer is not None:
- try:
- self.throttle_params = yield self.store.get_throttle_params_by_room(
- self.pusher_id
- )
- yield self._process()
- except Exception:
- logger.exception("Error starting email pusher")
+ self._start_processing()
def on_stop(self):
if self.timed_call:
@@ -92,43 +84,55 @@ class EmailPusher(object):
pass
self.timed_call = None
- @defer.inlineCallbacks
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
- self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
- yield self._process()
+ if self.max_stream_ordering:
+ self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
+ else:
+ self.max_stream_ordering = max_stream_ordering
+ self._start_processing()
def on_new_receipts(self, min_stream_id, max_stream_id):
# We could wake up and cancel the timer but there tend to be quite a
# lot of read receipts so it's probably less work to just let the
# timer fire
- return defer.succeed(None)
+ pass
- @defer.inlineCallbacks
def on_timer(self):
self.timed_call = None
- yield self._process()
+ self._start_processing()
+
+ def _start_processing(self):
+ if self._is_processing:
+ return
+
+ run_as_background_process("emailpush.process", self._process)
@defer.inlineCallbacks
def _process(self):
- if self.processing:
- return
+ # we should never get here if we are already processing
+ assert not self._is_processing
+
+ try:
+ self._is_processing = True
+
+ if self.throttle_params is None:
+ # this is our first loop: load up the throttle params
+ self.throttle_params = yield self.store.get_throttle_params_by_room(
+ self.pusher_id
+ )
- with LoggingContext("emailpush._process"):
- with Measure(self.clock, "emailpush._process"):
+ # if the max ordering changes while we're running _unsafe_process,
+ # call it again, and so on until we've caught up.
+ while True:
+ starting_max_ordering = self.max_stream_ordering
try:
- self.processing = True
- # if the max ordering changes while we're running _unsafe_process,
- # call it again, and so on until we've caught up.
- while True:
- starting_max_ordering = self.max_stream_ordering
- try:
- yield self._unsafe_process()
- except Exception:
- logger.exception("Exception processing notifs")
- if self.max_stream_ordering == starting_max_ordering:
- break
- finally:
- self.processing = False
+ yield self._unsafe_process()
+ except Exception:
+ logger.exception("Exception processing notifs")
+ if self.max_stream_ordering == starting_max_ordering:
+ break
+ finally:
+ self._is_processing = False
@defer.inlineCallbacks
def _unsafe_process(self):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 81e18bcf..87fa7f00 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -15,17 +15,21 @@
# limitations under the License.
import logging
+import six
+
from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
-from synapse.util.logcontext import LoggingContext
-from synapse.util.metrics import Measure
from . import push_rule_evaluator, push_tools
+if six.PY3:
+ long = int
+
logger = logging.getLogger(__name__)
http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
@@ -56,7 +60,7 @@ class HttpPusher(object):
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.failing_since = pusherdict['failing_since']
self.timed_call = None
- self.processing = False
+ self._is_processing = False
# This is the highest stream ordering we know it's safe to process.
# When new events arrive, we'll be given a window of new events: we
@@ -87,34 +91,27 @@ class HttpPusher(object):
self.data_minus_url.update(self.data)
del self.data_minus_url['url']
- @defer.inlineCallbacks
def on_started(self):
- try:
- yield self._process()
- except Exception:
- logger.exception("Error starting http pusher")
+ self._start_processing()
- @defer.inlineCallbacks
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
- self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
- yield self._process()
+ self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0)
+ self._start_processing()
- @defer.inlineCallbacks
def on_new_receipts(self, min_stream_id, max_stream_id):
# Note that the min here shouldn't be relied upon to be accurate.
# We could check the receipts are actually m.read receipts here,
# but currently that's the only type of receipt anyway...
- with LoggingContext("push.on_new_receipts"):
- with Measure(self.clock, "push.on_new_receipts"):
- badge = yield push_tools.get_badge_count(
- self.hs.get_datastore(), self.user_id
- )
- yield self._send_badge(badge)
+ run_as_background_process("http_pusher.on_new_receipts", self._update_badge)
@defer.inlineCallbacks
+ def _update_badge(self):
+ badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
+ yield self._send_badge(badge)
+
def on_timer(self):
- yield self._process()
+ self._start_processing()
def on_stop(self):
if self.timed_call:
@@ -124,27 +121,31 @@ class HttpPusher(object):
pass
self.timed_call = None
+ def _start_processing(self):
+ if self._is_processing:
+ return
+
+ run_as_background_process("httppush.process", self._process)
+
@defer.inlineCallbacks
def _process(self):
- if self.processing:
- return
+ # we should never get here if we are already processing
+ assert not self._is_processing
- with LoggingContext("push._process"):
- with Measure(self.clock, "push._process"):
+ try:
+ self._is_processing = True
+ # if the max ordering changes while we're running _unsafe_process,
+ # call it again, and so on until we've caught up.
+ while True:
+ starting_max_ordering = self.max_stream_ordering
try:
- self.processing = True
- # if the max ordering changes while we're running _unsafe_process,
- # call it again, and so on until we've caught up.
- while True:
- starting_max_ordering = self.max_stream_ordering
- try:
- yield self._unsafe_process()
- except Exception:
- logger.exception("Exception processing notifs")
- if self.max_stream_ordering == starting_max_ordering:
- break
- finally:
- self.processing = False
+ yield self._unsafe_process()
+ except Exception:
+ logger.exception("Exception processing notifs")
+ if self.max_stream_ordering == starting_max_ordering:
+ break
+ finally:
+ self._is_processing = False
@defer.inlineCallbacks
def _unsafe_process(self):
@@ -310,10 +311,10 @@ class HttpPusher(object):
]
}
}
- if event.type == 'm.room.member':
+ if event.type == 'm.room.member' and event.is_state():
d['notification']['membership'] = event.content['membership']
d['notification']['user_is_target'] = event.state_key == self.user_id
- if self.hs.config.push_include_content and 'content' in event:
+ if self.hs.config.push_include_content and event.content:
d['notification']['content'] = event.content
# We no longer send aliases separately, instead, we send the human
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index bfa6df7b..ebcb93bf 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -17,15 +17,15 @@ import email.mime.multipart
import email.utils
import logging
import time
-import urllib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
+from six.moves import urllib
+
import bleach
import jinja2
from twisted.internet import defer
-from twisted.mail.smtp import sendmail
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
@@ -84,6 +84,7 @@ class Mailer(object):
self.notif_template_html = notif_template_html
self.notif_template_text = notif_template_text
+ self.sendmail = self.hs.get_sendmail()
self.store = self.hs.get_datastore()
self.macaroon_gen = self.hs.get_macaroon_generator()
self.state_handler = self.hs.get_state_handler()
@@ -190,11 +191,11 @@ class Mailer(object):
multipart_msg.attach(html_part)
logger.info("Sending email push notification to %s" % email_address)
- # logger.debug(html_text)
- yield sendmail(
+ yield self.sendmail(
self.hs.config.email_smtp_host,
- raw_from, raw_to, multipart_msg.as_string(),
+ raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
+ reactor=self.hs.get_reactor(),
port=self.hs.config.email_smtp_port,
requireAuthentication=self.hs.config.email_smtp_user is not None,
username=self.hs.config.email_smtp_user,
@@ -332,7 +333,7 @@ class Mailer(object):
notif_events, user_id, reason):
if len(notifs_by_room) == 1:
# Only one room has new stuff
- room_id = notifs_by_room.keys()[0]
+ room_id = list(notifs_by_room.keys())[0]
# If the room has some kind of name, use it, but we don't
# want the generated-from-names one here otherwise we'll
@@ -440,7 +441,7 @@ class Mailer(object):
def make_room_link(self, room_id):
if self.hs.config.email_riot_base_url:
- base_url = self.hs.config.email_riot_base_url
+ base_url = "%s/#/room" % (self.hs.config.email_riot_base_url)
elif self.app_name == "Vector":
# need /beta for Universal Links to work on iOS
base_url = "https://vector.im/beta/#/room"
@@ -474,7 +475,7 @@ class Mailer(object):
# XXX: make r0 once API is stable
return "%s_matrix/client/unstable/pushers/remove?%s" % (
self.hs.config.public_baseurl,
- urllib.urlencode(params),
+ urllib.parse.urlencode(params),
)
@@ -525,8 +526,7 @@ def load_jinja2_templates(config):
Returns:
(notif_template_html, notif_template_text)
"""
- logger.info("loading jinja2")
-
+ logger.info("loading email templates from '%s'", config.email_template_dir)
loader = jinja2.FileSystemLoader(config.email_template_dir)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
@@ -561,7 +561,7 @@ def _create_mxc_to_http_filter(config):
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
config.public_baseurl,
serverAndMediaId,
- urllib.urlencode(params),
+ urllib.parse.urlencode(params),
fragment or "",
)
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
index 2bd321d5..cf6c8b87 100644
--- a/synapse/push/push_rule_evaluator.py
+++ b/synapse/push/push_rule_evaluator.py
@@ -124,7 +124,7 @@ class PushRuleEvaluatorForEvent(object):
# XXX: optimisation: cache our pattern regexps
if condition['key'] == 'content.body':
- body = self._event["content"].get("body", None)
+ body = self._event.content.get("body", None)
if not body:
return False
@@ -140,7 +140,7 @@ class PushRuleEvaluatorForEvent(object):
if not display_name:
return False
- body = self._event["content"].get("body", None)
+ body = self._event.content.get("body", None)
if not body:
return False
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 9f7d5ef2..5a4e73cc 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -20,24 +20,39 @@ from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push.pusher import PusherFactory
-from synapse.util.logcontext import make_deferred_yieldable, run_in_background
logger = logging.getLogger(__name__)
class PusherPool:
+ """
+ The pusher pool. This is responsible for dispatching notifications of new events to
+ the http and email pushers.
+
+ It provides three methods which are designed to be called by the rest of the
+ application: `start`, `on_new_notifications`, and `on_new_receipts`: each of these
+ delegates to each of the relevant pushers.
+
+ Note that it is expected that each pusher will have its own 'processing' loop which
+ will send out the notifications in the background, rather than blocking until the
+ notifications are sent; accordingly Pusher.on_started, Pusher.on_new_notifications and
+ Pusher.on_new_receipts are not expected to return deferreds.
+ """
def __init__(self, _hs):
self.hs = _hs
self.pusher_factory = PusherFactory(_hs)
- self.start_pushers = _hs.config.start_pushers
+ self._should_start_pushers = _hs.config.start_pushers
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()
self.pushers = {}
- @defer.inlineCallbacks
def start(self):
- pushers = yield self.store.get_all_pushers()
- self._start_pushers(pushers)
+ """Starts the pushers off in a background process.
+ """
+ if not self._should_start_pushers:
+ logger.info("Not starting pushers because they are disabled in the config")
+ return
+ run_as_background_process("start_pushers", self._start_pushers)
@defer.inlineCallbacks
def add_pusher(self, user_id, access_token, kind, app_id,
@@ -86,7 +101,7 @@ class PusherPool:
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
)
- yield self._refresh_pusher(app_id, pushkey, user_id)
+ yield self.start_pusher_by_id(app_id, pushkey, user_id)
@defer.inlineCallbacks
def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey,
@@ -123,45 +138,23 @@ class PusherPool:
p['app_id'], p['pushkey'], p['user_name'],
)
- def on_new_notifications(self, min_stream_id, max_stream_id):
- run_as_background_process(
- "on_new_notifications",
- self._on_new_notifications, min_stream_id, max_stream_id,
- )
-
@defer.inlineCallbacks
- def _on_new_notifications(self, min_stream_id, max_stream_id):
+ def on_new_notifications(self, min_stream_id, max_stream_id):
try:
users_affected = yield self.store.get_push_action_users_in_range(
min_stream_id, max_stream_id
)
- deferreds = []
-
for u in users_affected:
if u in self.pushers:
for p in self.pushers[u].values():
- deferreds.append(
- run_in_background(
- p.on_new_notifications,
- min_stream_id, max_stream_id,
- )
- )
-
- yield make_deferred_yieldable(
- defer.gatherResults(deferreds, consumeErrors=True),
- )
+ p.on_new_notifications(min_stream_id, max_stream_id)
+
except Exception:
logger.exception("Exception in pusher on_new_notifications")
- def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
- run_as_background_process(
- "on_new_receipts",
- self._on_new_receipts, min_stream_id, max_stream_id, affected_room_ids,
- )
-
@defer.inlineCallbacks
- def _on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
+ def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
try:
# Need to subtract 1 from the minimum because the lower bound here
# is not inclusive
@@ -171,26 +164,20 @@ class PusherPool:
# This returns a tuple, user_id is at index 3
users_affected = set([r[3] for r in updated_receipts])
- deferreds = []
-
for u in users_affected:
if u in self.pushers:
for p in self.pushers[u].values():
- deferreds.append(
- run_in_background(
- p.on_new_receipts,
- min_stream_id, max_stream_id,
- )
- )
-
- yield make_deferred_yieldable(
- defer.gatherResults(deferreds, consumeErrors=True),
- )
+ p.on_new_receipts(min_stream_id, max_stream_id)
+
except Exception:
logger.exception("Exception in pusher on_new_receipts")
@defer.inlineCallbacks
- def _refresh_pusher(self, app_id, pushkey, user_id):
+ def start_pusher_by_id(self, app_id, pushkey, user_id):
+ """Look up the details for the given pusher, and start it"""
+ if not self._should_start_pushers:
+ return
+
resultlist = yield self.store.get_pushers_by_app_id_and_pushkey(
app_id, pushkey
)
@@ -201,33 +188,49 @@ class PusherPool:
p = r
if p:
+ self._start_pusher(p)
- self._start_pushers([p])
+ @defer.inlineCallbacks
+ def _start_pushers(self):
+ """Start all the pushers
- def _start_pushers(self, pushers):
- if not self.start_pushers:
- logger.info("Not starting pushers because they are disabled in the config")
- return
+ Returns:
+ Deferred
+ """
+ pushers = yield self.store.get_all_pushers()
logger.info("Starting %d pushers", len(pushers))
for pusherdict in pushers:
- try:
- p = self.pusher_factory.create_pusher(pusherdict)
- except Exception:
- logger.exception("Couldn't start a pusher: caught Exception")
- continue
- if p:
- appid_pushkey = "%s:%s" % (
- pusherdict['app_id'],
- pusherdict['pushkey'],
- )
- byuser = self.pushers.setdefault(pusherdict['user_name'], {})
+ self._start_pusher(pusherdict)
+ logger.info("Started pushers")
- if appid_pushkey in byuser:
- byuser[appid_pushkey].on_stop()
- byuser[appid_pushkey] = p
- run_in_background(p.on_started)
+ def _start_pusher(self, pusherdict):
+ """Start the given pusher
- logger.info("Started pushers")
+ Args:
+ pusherdict (dict):
+
+ Returns:
+ None
+ """
+ try:
+ p = self.pusher_factory.create_pusher(pusherdict)
+ except Exception:
+ logger.exception("Couldn't start a pusher: caught Exception")
+ return
+
+ if not p:
+ return
+
+ appid_pushkey = "%s:%s" % (
+ pusherdict['app_id'],
+ pusherdict['pushkey'],
+ )
+ byuser = self.pushers.setdefault(pusherdict['user_name'], {})
+
+ if appid_pushkey in byuser:
+ byuser[appid_pushkey].on_stop()
+ byuser[appid_pushkey] = p
+ p.on_started()
@defer.inlineCallbacks
def remove_pusher(self, app_id, pushkey, user_id):
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 942d7c72..ca62ee76 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -33,31 +33,38 @@ logger = logging.getLogger(__name__)
# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies
REQUIREMENTS = {
"jsonschema>=2.5.1": ["jsonschema>=2.5.1"],
- "frozendict>=0.4": ["frozendict"],
+ "frozendict>=1": ["frozendict"],
"unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"],
"canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"],
"signedjson>=1.0.0": ["signedjson>=1.0.0"],
"pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
- "service_identity>=1.0.0": ["service_identity>=1.0.0"],
+ "service_identity>=16.0.0": ["service_identity>=16.0.0"],
"Twisted>=17.1.0": ["twisted>=17.1.0"],
-
- # We use crypto.get_elliptic_curve which is only supported in >=0.15
- "pyopenssl>=0.15": ["OpenSSL>=0.15"],
-
- "pyyaml": ["yaml"],
- "pyasn1": ["pyasn1"],
- "daemonize": ["daemonize"],
- "bcrypt": ["bcrypt>=3.1.0"],
- "pillow": ["PIL"],
- "pydenticon": ["pydenticon"],
- "sortedcontainers": ["sortedcontainers"],
- "pysaml2>=3.0.0": ["saml2>=3.0.0"],
- "pymacaroons-pynacl": ["pymacaroons"],
- "msgpack-python>=0.3.0": ["msgpack"],
+ "treq>=15.1": ["treq>=15.1"],
+
+ # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
+ "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"],
+
+ "pyyaml>=3.11": ["yaml"],
+ "pyasn1>=0.1.9": ["pyasn1"],
+ "pyasn1-modules>=0.0.7": ["pyasn1_modules"],
+ "daemonize>=2.3.1": ["daemonize"],
+ "bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
+ "pillow>=3.1.2": ["PIL"],
+ "sortedcontainers>=1.4.4": ["sortedcontainers"],
+ "psutil>=2.0.0": ["psutil>=2.0.0"],
+ "pysaml2>=3.0.0": ["saml2"],
+ "pymacaroons-pynacl>=0.9.3": ["pymacaroons"],
+ "msgpack-python>=0.4.2": ["msgpack"],
"phonenumbers>=8.2.0": ["phonenumbers"],
- "six": ["six"],
- "prometheus_client": ["prometheus_client"],
- "attrs": ["attr"],
+ "six>=1.10": ["six"],
+
+ # prometheus_client 0.4.0 changed the format of counter metrics
+ # (cf https://github.com/matrix-org/synapse/issues/4001)
+ "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"],
+
+ # we use attr.s(slots), which arrived in 16.0.0
+ "attrs>=16.0.0": ["attr>=16.0.0"],
"netaddr>=0.7.18": ["netaddr"],
}
@@ -72,12 +79,6 @@ CONDITIONAL_REQUIREMENTS = {
"matrix-synapse-ldap3": {
"matrix-synapse-ldap3>=0.1": ["ldap_auth_provider"],
},
- "psutil": {
- "psutil>=2.0.0": ["psutil>=2.0.0"],
- },
- "affinity": {
- "affinity": ["affinity"],
- },
"postgres": {
"psycopg2>=2.6": ["psycopg2"]
}
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 3f7be74e..2d81d49e 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -15,6 +15,8 @@
import logging
+import six
+
from synapse.storage._base import SQLBaseStore
from synapse.storage.engines import PostgresEngine
@@ -23,6 +25,13 @@ from ._slaved_id_tracker import SlavedIdTracker
logger = logging.getLogger(__name__)
+def __func__(inp):
+ if six.PY3:
+ return inp
+ else:
+ return inp.__func__
+
+
class BaseSlavedStore(SQLBaseStore):
def __init__(self, db_conn, hs):
super(BaseSlavedStore, self).__init__(db_conn, hs)
diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py
index 87eaa530..4f19fd35 100644
--- a/synapse/replication/slave/storage/deviceinbox.py
+++ b/synapse/replication/slave/storage/deviceinbox.py
@@ -17,7 +17,7 @@ from synapse.storage import DataStore
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -43,11 +43,11 @@ class SlavedDeviceInboxStore(BaseSlavedStore):
expiry_ms=30 * 60 * 1000,
)
- get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__
- get_new_messages_for_device = DataStore.get_new_messages_for_device.__func__
- get_new_device_msgs_for_remote = DataStore.get_new_device_msgs_for_remote.__func__
- delete_messages_for_device = DataStore.delete_messages_for_device.__func__
- delete_device_msgs_for_remote = DataStore.delete_device_msgs_for_remote.__func__
+ get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token)
+ get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device)
+ get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote)
+ delete_messages_for_device = __func__(DataStore.delete_messages_for_device)
+ delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote)
def stream_positions(self):
result = super(SlavedDeviceInboxStore, self).stream_positions()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 8206a988..ec2fd561 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -17,7 +17,7 @@ from synapse.storage import DataStore
from synapse.storage.end_to_end_keys import EndToEndKeyStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -38,14 +38,14 @@ class SlavedDeviceStore(BaseSlavedStore):
"DeviceListFederationStreamChangeCache", device_list_max,
)
- get_device_stream_token = DataStore.get_device_stream_token.__func__
- get_user_whose_devices_changed = DataStore.get_user_whose_devices_changed.__func__
- get_devices_by_remote = DataStore.get_devices_by_remote.__func__
- _get_devices_by_remote_txn = DataStore._get_devices_by_remote_txn.__func__
- _get_e2e_device_keys_txn = DataStore._get_e2e_device_keys_txn.__func__
- mark_as_sent_devices_by_remote = DataStore.mark_as_sent_devices_by_remote.__func__
+ get_device_stream_token = __func__(DataStore.get_device_stream_token)
+ get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed)
+ get_devices_by_remote = __func__(DataStore.get_devices_by_remote)
+ _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn)
+ _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn)
+ mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote)
_mark_as_sent_devices_by_remote_txn = (
- DataStore._mark_as_sent_devices_by_remote_txn.__func__
+ __func__(DataStore._mark_as_sent_devices_by_remote_txn)
)
count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"]
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 5777f07c..e933b170 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -16,7 +16,7 @@
from synapse.storage import DataStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -33,9 +33,9 @@ class SlavedGroupServerStore(BaseSlavedStore):
"_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(),
)
- get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__
- get_group_stream_token = DataStore.get_group_stream_token.__func__
- get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__
+ get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user)
+ get_group_stream_token = __func__(DataStore.get_group_stream_token)
+ get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user)
def stream_positions(self):
result = super(SlavedGroupServerStore, self).stream_positions()
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index 05ed1684..8032f53f 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -16,7 +16,7 @@
from synapse.storage import DataStore
from synapse.storage.keys import KeyStore
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
class SlavedKeyStore(BaseSlavedStore):
@@ -24,11 +24,11 @@ class SlavedKeyStore(BaseSlavedStore):
"_get_server_verify_key"
]
- get_server_verify_keys = DataStore.get_server_verify_keys.__func__
- store_server_verify_key = DataStore.store_server_verify_key.__func__
+ get_server_verify_keys = __func__(DataStore.get_server_verify_keys)
+ store_server_verify_key = __func__(DataStore.store_server_verify_key)
- get_server_certificate = DataStore.get_server_certificate.__func__
- store_server_certificate = DataStore.store_server_certificate.__func__
+ get_server_certificate = __func__(DataStore.get_server_certificate)
+ store_server_certificate = __func__(DataStore.store_server_certificate)
- get_server_keys_json = DataStore.get_server_keys_json.__func__
- store_server_keys_json = DataStore.store_server_keys_json.__func__
+ get_server_keys_json = __func__(DataStore.get_server_keys_json)
+ store_server_keys_json = __func__(DataStore.store_server_keys_json)
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 80b74408..92447b00 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -17,7 +17,7 @@ from synapse.storage import DataStore
from synapse.storage.presence import PresenceStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -34,8 +34,8 @@ class SlavedPresenceStore(BaseSlavedStore):
"PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
)
- _get_active_presence = DataStore._get_active_presence.__func__
- take_presence_startup_info = DataStore.take_presence_startup_info.__func__
+ _get_active_presence = __func__(DataStore._get_active_presence)
+ take_presence_startup_info = __func__(DataStore.take_presence_startup_info)
_get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"]
get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"]
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index cbe96458..586dddb4 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -106,7 +106,7 @@ class ReplicationClientHandler(object):
Can be overriden in subclasses to handle more.
"""
- logger.info("Received rdata %s -> %s", stream_name, token)
+ logger.debug("Received rdata %s -> %s", stream_name, token)
return self.store.process_replication_rows(stream_name, token, rows)
def on_position(self, stream_name, token):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 74e892c1..0b3fe6cb 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -590,9 +590,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
pending_commands = LaterGauge(
"synapse_replication_tcp_protocol_pending_commands",
"",
- ["name", "conn_id"],
+ ["name"],
lambda: {
- (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections
+ (p.name,): len(p.pending_commands) for p in connected_connections
},
)
@@ -607,9 +607,9 @@ def transport_buffer_size(protocol):
transport_send_buffer = LaterGauge(
"synapse_replication_tcp_protocol_transport_send_buffer",
"",
- ["name", "conn_id"],
+ ["name"],
lambda: {
- (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections
+ (p.name,): transport_buffer_size(p) for p in connected_connections
},
)
@@ -632,9 +632,9 @@ def transport_kernel_read_buffer_size(protocol, read=True):
tcp_transport_kernel_send_buffer = LaterGauge(
"synapse_replication_tcp_protocol_transport_kernel_send_buffer",
"",
- ["name", "conn_id"],
+ ["name"],
lambda: {
- (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False)
+ (p.name,): transport_kernel_read_buffer_size(p, False)
for p in connected_connections
},
)
@@ -643,9 +643,9 @@ tcp_transport_kernel_send_buffer = LaterGauge(
tcp_transport_kernel_read_buffer = LaterGauge(
"synapse_replication_tcp_protocol_transport_kernel_read_buffer",
"",
- ["name", "conn_id"],
+ ["name"],
lambda: {
- (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True)
+ (p.name,): transport_kernel_read_buffer_size(p, True)
for p in connected_connections
},
)
@@ -654,9 +654,9 @@ tcp_transport_kernel_read_buffer = LaterGauge(
tcp_inbound_commands = LaterGauge(
"synapse_replication_tcp_protocol_inbound_commands",
"",
- ["command", "name", "conn_id"],
+ ["command", "name"],
lambda: {
- (k[0], p.name, p.conn_id): count
+ (k, p.name,): count
for p in connected_connections
for k, count in iteritems(p.inbound_commands_counter)
},
@@ -665,9 +665,9 @@ tcp_inbound_commands = LaterGauge(
tcp_outbound_commands = LaterGauge(
"synapse_replication_tcp_protocol_outbound_commands",
"",
- ["command", "name", "conn_id"],
+ ["command", "name"],
lambda: {
- (k[0], p.name, p.conn_id): count
+ (k, p.name,): count
for p in connected_connections
for k, count in iteritems(p.outbound_commands_counter)
},
diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py
index 55fe701c..c1e626be 100644
--- a/synapse/replication/tcp/streams.py
+++ b/synapse/replication/tcp/streams.py
@@ -196,7 +196,7 @@ class Stream(object):
)
if len(rows) >= MAX_EVENTS_BEHIND:
- raise Exception("stream %s has fallen behined" % (self.NAME))
+ raise Exception("stream %s has fallen behind" % (self.NAME))
else:
rows = yield self.update_function(
from_token, current_token,
diff --git a/res/templates/mail-Vector.css b/synapse/res/templates/mail-Vector.css
index 6a3e36ed..6a3e36ed 100644
--- a/res/templates/mail-Vector.css
+++ b/synapse/res/templates/mail-Vector.css
diff --git a/res/templates/mail.css b/synapse/res/templates/mail.css
index 5ab3e1b0..5ab3e1b0 100644
--- a/res/templates/mail.css
+++ b/synapse/res/templates/mail.css
diff --git a/res/templates/notif.html b/synapse/res/templates/notif.html
index 88b921ca..88b921ca 100644
--- a/res/templates/notif.html
+++ b/synapse/res/templates/notif.html
diff --git a/res/templates/notif.txt b/synapse/res/templates/notif.txt
index a37bee98..a37bee98 100644
--- a/res/templates/notif.txt
+++ b/synapse/res/templates/notif.txt
diff --git a/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html
index fcdb3109..fcdb3109 100644
--- a/res/templates/notif_mail.html
+++ b/synapse/res/templates/notif_mail.html
diff --git a/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt
index 24843042..24843042 100644
--- a/res/templates/notif_mail.txt
+++ b/synapse/res/templates/notif_mail.txt
diff --git a/res/templates/room.html b/synapse/res/templates/room.html
index 723c222d..723c222d 100644
--- a/res/templates/room.html
+++ b/synapse/res/templates/room.html
diff --git a/res/templates/room.txt b/synapse/res/templates/room.txt
index 84648c71..84648c71 100644
--- a/res/templates/room.txt
+++ b/synapse/res/templates/room.txt
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 3418f06f..5f35c2d1 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -46,6 +46,8 @@ from synapse.rest.client.v2_alpha import (
receipts,
register,
report_event,
+ room_keys,
+ room_upgrade_rest_servlet,
sendtodevice,
sync,
tags,
@@ -102,6 +104,7 @@ class ClientRestResource(JsonResource):
auth.register_servlets(hs, client_resource)
receipts.register_servlets(hs, client_resource)
read_marker.register_servlets(hs, client_resource)
+ room_keys.register_servlets(hs, client_resource)
keys.register_servlets(hs, client_resource)
tokenrefresh.register_servlets(hs, client_resource)
tags.register_servlets(hs, client_resource)
@@ -114,3 +117,4 @@ class ClientRestResource(JsonResource):
sendtodevice.register_servlets(hs, client_resource)
user_directory.register_servlets(hs, client_resource)
groups.register_servlets(hs, client_resource)
+ room_upgrade_rest_servlet.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index ad536ab5..41534b8c 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -101,7 +101,7 @@ class UserRegisterServlet(ClientV1RestServlet):
nonce = self.hs.get_secrets().token_hex(64)
self.nonces[nonce] = int(self.reactor.seconds())
- return (200, {"nonce": nonce.encode('ascii')})
+ return (200, {"nonce": nonce})
@defer.inlineCallbacks
def on_POST(self, request):
@@ -164,7 +164,7 @@ class UserRegisterServlet(ClientV1RestServlet):
key=self.hs.config.registration_shared_secret.encode(),
digestmod=hashlib.sha1,
)
- want_mac.update(nonce)
+ want_mac.update(nonce.encode('utf8'))
want_mac.update(b"\x00")
want_mac.update(username)
want_mac.update(b"\x00")
@@ -173,7 +173,10 @@ class UserRegisterServlet(ClientV1RestServlet):
want_mac.update(b"admin" if admin else b"notadmin")
want_mac = want_mac.hexdigest()
- if not hmac.compare_digest(want_mac, got_mac.encode('ascii')):
+ if not hmac.compare_digest(
+ want_mac.encode('ascii'),
+ got_mac.encode('ascii')
+ ):
raise SynapseError(403, "HMAC incorrect")
# Reuse the parts of RegisterRestServlet to reduce code duplication
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 97733f30..0220acf6 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -74,38 +74,11 @@ class ClientDirectoryServer(ClientV1RestServlet):
if room is None:
raise SynapseError(400, "Room does not exist")
- dir_handler = self.handlers.directory_handler
+ requester = yield self.auth.get_user_by_req(request)
- try:
- # try to auth as a user
- requester = yield self.auth.get_user_by_req(request)
- try:
- user_id = requester.user.to_string()
- yield dir_handler.create_association(
- user_id, room_alias, room_id, servers
- )
- yield dir_handler.send_room_alias_update_event(
- requester,
- user_id,
- room_id
- )
- except SynapseError as e:
- raise e
- except Exception:
- logger.exception("Failed to create association")
- raise
- except AuthError:
- # try to auth as an application service
- service = yield self.auth.get_appservice_by_req(request)
- yield dir_handler.create_appservice_association(
- service, room_alias, room_id, servers
- )
- logger.info(
- "Application service at %s created alias %s pointing to %s",
- service.url,
- room_alias.to_string(),
- room_id
- )
+ yield self.handlers.directory_handler.create_association(
+ requester, room_alias, room_id, servers
+ )
defer.returnValue((200, {}))
@@ -135,7 +108,7 @@ class ClientDirectoryServer(ClientV1RestServlet):
room_alias = RoomAlias.from_string(room_alias)
yield dir_handler.delete_association(
- requester, user.to_string(), room_alias
+ requester, room_alias
)
logger.info(
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 0f3a2e8b..cd9b3bdb 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -45,20 +45,20 @@ class EventStreamRestServlet(ClientV1RestServlet):
is_guest = requester.is_guest
room_id = None
if is_guest:
- if "room_id" not in request.args:
+ if b"room_id" not in request.args:
raise SynapseError(400, "Guest users must specify room_id param")
- if "room_id" in request.args:
- room_id = request.args["room_id"][0]
+ if b"room_id" in request.args:
+ room_id = request.args[b"room_id"][0].decode('ascii')
pagin_config = PaginationConfig.from_request(request)
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
- if "timeout" in request.args:
+ if b"timeout" in request.args:
try:
- timeout = int(request.args["timeout"][0])
+ timeout = int(request.args[b"timeout"][0])
except ValueError:
raise SynapseError(400, "timeout must be in milliseconds.")
- as_client_event = "raw" not in request.args
+ as_client_event = b"raw" not in request.args
chunk = yield self.event_stream_handler.get_stream(
requester.user.to_string(),
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index fd5f85b5..3ead75cb 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -32,7 +32,7 @@ class InitialSyncRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request)
- as_client_event = "raw" not in request.args
+ as_client_event = b"raw" not in request.args
pagination_config = PaginationConfig.from_request(request)
include_archived = parse_boolean(request, "archived", default=False)
content = yield self.initial_sync_handler.snapshot_all_rooms(
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index cb85fa14..0010699d 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -14,10 +14,9 @@
# limitations under the License.
import logging
-import urllib
import xml.etree.ElementTree as ET
-from six.moves.urllib import parse as urlparse
+from six.moves import urllib
from canonicaljson import json
from saml2 import BINDING_HTTP_POST, config
@@ -134,7 +133,7 @@ class LoginRestServlet(ClientV1RestServlet):
LoginRestServlet.SAML2_TYPE):
relay_state = ""
if "relay_state" in login_submission:
- relay_state = "&RelayState=" + urllib.quote(
+ relay_state = "&RelayState=" + urllib.parse.quote(
login_submission["relay_state"])
result = {
"uri": "%s%s" % (self.idp_redirect_url, relay_state)
@@ -366,7 +365,7 @@ class SAML2RestServlet(ClientV1RestServlet):
(user_id, token) = yield handler.register_saml2(username)
# Forward to the RelayState callback along with ava
if 'RelayState' in request.args:
- request.redirect(urllib.unquote(
+ request.redirect(urllib.parse.unquote(
request.args['RelayState'][0]) +
'?status=authenticated&access_token=' +
token + '&user_id=' + user_id + '&ava=' +
@@ -377,7 +376,7 @@ class SAML2RestServlet(ClientV1RestServlet):
"user_id": user_id, "token": token,
"ava": saml2_auth.ava}))
elif 'RelayState' in request.args:
- request.redirect(urllib.unquote(
+ request.redirect(urllib.parse.unquote(
request.args['RelayState'][0]) +
'?status=not_authenticated')
finish_request(request)
@@ -390,21 +389,22 @@ class CasRedirectServlet(ClientV1RestServlet):
def __init__(self, hs):
super(CasRedirectServlet, self).__init__(hs)
- self.cas_server_url = hs.config.cas_server_url
- self.cas_service_url = hs.config.cas_service_url
+ self.cas_server_url = hs.config.cas_server_url.encode('ascii')
+ self.cas_service_url = hs.config.cas_service_url.encode('ascii')
def on_GET(self, request):
args = request.args
- if "redirectUrl" not in args:
+ if b"redirectUrl" not in args:
return (400, "Redirect URL not specified for CAS auth")
- client_redirect_url_param = urllib.urlencode({
- "redirectUrl": args["redirectUrl"][0]
- })
- hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket"
- service_param = urllib.urlencode({
- "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param)
- })
- request.redirect("%s/login?%s" % (self.cas_server_url, service_param))
+ client_redirect_url_param = urllib.parse.urlencode({
+ b"redirectUrl": args[b"redirectUrl"][0]
+ }).encode('ascii')
+ hs_redirect_url = (self.cas_service_url +
+ b"/_matrix/client/api/v1/login/cas/ticket")
+ service_param = urllib.parse.urlencode({
+ b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)
+ }).encode('ascii')
+ request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param))
finish_request(request)
@@ -422,11 +422,11 @@ class CasTicketServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request):
- client_redirect_url = request.args["redirectUrl"][0]
+ client_redirect_url = request.args[b"redirectUrl"][0]
http_client = self.hs.get_simple_http_client()
uri = self.cas_server_url + "/proxyValidate"
args = {
- "ticket": request.args["ticket"],
+ "ticket": request.args[b"ticket"][0].decode('ascii'),
"service": self.cas_service_url
}
try:
@@ -471,11 +471,11 @@ class CasTicketServlet(ClientV1RestServlet):
finish_request(request)
def add_login_token_to_redirect_url(self, url, token):
- url_parts = list(urlparse.urlparse(url))
- query = dict(urlparse.parse_qsl(url_parts[4]))
+ url_parts = list(urllib.parse.urlparse(url))
+ query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update({"loginToken": token})
- url_parts[4] = urllib.urlencode(query)
- return urlparse.urlunparse(url_parts)
+ url_parts[4] = urllib.parse.urlencode(query).encode('ascii')
+ return urllib.parse.urlunparse(url_parts)
def parse_cas_response(self, cas_response_body):
user = None
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 6e95d9be..9382b1f1 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -46,7 +46,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
try:
priority_class = _priority_class_from_spec(spec)
except InvalidRuleException as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
requester = yield self.auth.get_user_by_req(request)
@@ -73,7 +73,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
content,
)
except InvalidRuleException as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
before = parse_string(request, "before")
if before:
@@ -95,9 +95,9 @@ class PushRuleRestServlet(ClientV1RestServlet):
)
self.notify_user(user_id)
except InconsistentRuleException as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
except RuleNotFoundException as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
defer.returnValue((200, {}))
@@ -142,10 +142,10 @@ class PushRuleRestServlet(ClientV1RestServlet):
PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
)
- if path[0] == '':
+ if path[0] == b'':
defer.returnValue((200, rules))
- elif path[0] == 'global':
- path = path[1:]
+ elif path[0] == b'global':
+ path = [x.decode('ascii') for x in path[1:]]
result = _filter_ruleset_with_path(rules['global'], path)
defer.returnValue((200, result))
else:
@@ -192,10 +192,10 @@ class PushRuleRestServlet(ClientV1RestServlet):
def _rule_spec_from_path(path):
if len(path) < 2:
raise UnrecognizedRequestError()
- if path[0] != 'pushrules':
+ if path[0] != b'pushrules':
raise UnrecognizedRequestError()
- scope = path[1]
+ scope = path[1].decode('ascii')
path = path[2:]
if scope != 'global':
raise UnrecognizedRequestError()
@@ -203,13 +203,13 @@ def _rule_spec_from_path(path):
if len(path) == 0:
raise UnrecognizedRequestError()
- template = path[0]
+ template = path[0].decode('ascii')
path = path[1:]
if len(path) == 0 or len(path[0]) == 0:
raise UnrecognizedRequestError()
- rule_id = path[0]
+ rule_id = path[0].decode('ascii')
spec = {
'scope': scope,
@@ -220,7 +220,7 @@ def _rule_spec_from_path(path):
path = path[1:]
if len(path) > 0 and len(path[0]) > 0:
- spec['attr'] = path[0]
+ spec['attr'] = path[0].decode('ascii')
return spec
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index 182a68b1..b84f0260 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -59,7 +59,7 @@ class PushersRestServlet(ClientV1RestServlet):
]
for p in pushers:
- for k, v in p.items():
+ for k, v in list(p.items()):
if k not in allowed_keys:
del p[k]
@@ -126,7 +126,7 @@ class PushersSetRestServlet(ClientV1RestServlet):
profile_tag=content.get('profile_tag', ""),
)
except PusherConfigException as pce:
- raise SynapseError(400, "Config Error: " + pce.message,
+ raise SynapseError(400, "Config Error: " + str(pce),
errcode=Codes.MISSING_PARAM)
self.notifier.on_new_replication_data()
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 976d9838..fcfe7857 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -33,6 +33,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
@@ -207,7 +208,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
"sender": requester.user.to_string(),
}
- if 'ts' in request.args and requester.app_service:
+ if b'ts' in request.args and requester.app_service:
event_dict['origin_server_ts'] = parse_integer(request, "ts", 0)
event = yield self.event_creation_hander.create_and_send_nonmember_event(
@@ -255,7 +256,9 @@ class JoinRoomAliasServlet(ClientV1RestServlet):
if RoomID.is_valid(room_identifier):
room_id = room_identifier
try:
- remote_room_hosts = request.args["server_name"]
+ remote_room_hosts = [
+ x.decode('ascii') for x in request.args[b"server_name"]
+ ]
except Exception:
remote_room_hosts = None
elif RoomAlias.is_valid(room_identifier):
@@ -407,7 +410,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
room_id=room_id,
user_id=requester.user.to_string(),
at_token=at_token,
- types=[(EventTypes.Member, None)],
+ state_filter=StateFilter.from_types([(EventTypes.Member, None)]),
)
chunk = []
@@ -461,10 +464,10 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
pagination_config = PaginationConfig.from_request(
request, default_limit=10,
)
- as_client_event = "raw" not in request.args
- filter_bytes = parse_string(request, "filter")
+ as_client_event = b"raw" not in request.args
+ filter_bytes = parse_string(request, b"filter", encoding=None)
if filter_bytes:
- filter_json = urlparse.unquote(filter_bytes).decode("UTF-8")
+ filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
event_filter = Filter(json.loads(filter_json))
else:
event_filter = None
@@ -560,7 +563,7 @@ class RoomEventContextServlet(ClientV1RestServlet):
# picking the API shape for symmetry with /messages
filter_bytes = parse_string(request, "filter")
if filter_bytes:
- filter_json = urlparse.unquote(filter_bytes).decode("UTF-8")
+ filter_json = urlparse.unquote(filter_bytes)
event_filter = Filter(json.loads(filter_json))
else:
event_filter = None
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index 62f4c3d9..53da905e 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -42,7 +42,11 @@ class VoipRestServlet(ClientV1RestServlet):
expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000
username = "%d:%s" % (expiry, requester.user.to_string())
- mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1)
+ mac = hmac.new(
+ turnSecret.encode(),
+ msg=username.encode(),
+ digestmod=hashlib.sha1
+ )
# We need to use standard padded base64 encoding here
# encode_base64 because we need to add the standard padding to get the
# same result as the TURN server.
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 372648ca..37b32dd3 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -53,7 +53,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "email", body['email']):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Your email domain is not authorized on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
@@ -89,7 +91,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Account phone numbers are not authorized on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.datastore.get_user_id_by_threepid(
@@ -241,7 +245,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "email", body['email']):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Your email domain is not authorized on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.datastore.get_user_id_by_threepid(
@@ -276,7 +282,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Account phone numbers are not authorized on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.datastore.get_user_id_by_threepid(
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index bd8b5f4a..a8d8ed65 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -68,6 +68,29 @@ function captchaDone() {
</html>
"""
+TERMS_TEMPLATE = """
+<html>
+<head>
+<title>Authentication</title>
+<meta name='viewport' content='width=device-width, initial-scale=1,
+ user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+</head>
+<body>
+<form id="registrationForm" method="post" action="%(myurl)s">
+ <div>
+ <p>
+ Please click the button below if you agree to the
+ <a href="%(terms_url)s">privacy policy of this homeserver.</a>
+ </p>
+ <input type="hidden" name="session" value="%(session)s" />
+ <input type="submit" value="Agree" />
+ </div>
+</form>
+</body>
+</html>
+"""
+
SUCCESS_TEMPLATE = """
<html>
<head>
@@ -99,7 +122,7 @@ class AuthRestServlet(RestServlet):
cannot be handled in the normal flow (with requests to the same endpoint).
Current use is for web fallback auth.
"""
- PATTERNS = client_v2_patterns("/auth/(?P<stagetype>[\w\.]*)/fallback/web")
+ PATTERNS = client_v2_patterns(r"/auth/(?P<stagetype>[\w\.]*)/fallback/web")
def __init__(self, hs):
super(AuthRestServlet, self).__init__()
@@ -133,13 +156,34 @@ class AuthRestServlet(RestServlet):
request.write(html_bytes)
finish_request(request)
defer.returnValue(None)
+ elif stagetype == LoginType.TERMS:
+ session = request.args['session'][0]
+
+ html = TERMS_TEMPLATE % {
+ 'session': session,
+ 'terms_url': "%s/_matrix/consent?v=%s" % (
+ self.hs.config.public_baseurl,
+ self.hs.config.user_consent_version,
+ ),
+ 'myurl': "%s/auth/%s/fallback/web" % (
+ CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+ ),
+ }
+ html_bytes = html.encode("utf8")
+ request.setResponseCode(200)
+ request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+ request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+ request.write(html_bytes)
+ finish_request(request)
+ defer.returnValue(None)
else:
raise SynapseError(404, "Unknown auth stage type")
@defer.inlineCallbacks
def on_POST(self, request, stagetype):
yield
- if stagetype == "m.login.recaptcha":
+ if stagetype == LoginType.RECAPTCHA:
if ('g-recaptcha-response' not in request.args or
len(request.args['g-recaptcha-response'])) == 0:
raise SynapseError(400, "No captcha response supplied")
@@ -179,6 +223,41 @@ class AuthRestServlet(RestServlet):
finish_request(request)
defer.returnValue(None)
+ elif stagetype == LoginType.TERMS:
+ if ('session' not in request.args or
+ len(request.args['session'])) == 0:
+ raise SynapseError(400, "No session supplied")
+
+ session = request.args['session'][0]
+ authdict = {'session': session}
+
+ success = yield self.auth_handler.add_oob_auth(
+ LoginType.TERMS,
+ authdict,
+ self.hs.get_ip_from_request(request)
+ )
+
+ if success:
+ html = SUCCESS_TEMPLATE
+ else:
+ html = TERMS_TEMPLATE % {
+ 'session': session,
+ 'terms_url': "%s/_matrix/consent?v=%s" % (
+ self.hs.config.public_baseurl,
+ self.hs.config.user_consent_version,
+ ),
+ 'myurl': "%s/auth/%s/fallback/web" % (
+ CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+ ),
+ }
+ html_bytes = html.encode("utf8")
+ request.setResponseCode(200)
+ request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+ request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+ request.write(html_bytes)
+ finish_request(request)
+ defer.returnValue(None)
else:
raise SynapseError(404, "Unknown auth stage type")
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 2fb4d43c..0515715f 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -75,7 +75,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "email", body['email']):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Your email domain is not authorized to register on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
@@ -115,7 +117,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
- 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED,
+ 403,
+ "Phone numbers are not authorized to register on this server",
+ Codes.THREEPID_DENIED,
)
existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
@@ -355,6 +359,13 @@ class RegisterRestServlet(RestServlet):
[LoginType.MSISDN, LoginType.EMAIL_IDENTITY]
])
+ # Append m.login.terms to all flows if we're requiring consent
+ if self.hs.config.user_consent_at_registration:
+ new_flows = []
+ for flow in flows:
+ flow.append(LoginType.TERMS)
+ flows.extend(new_flows)
+
auth_result, params, session_id = yield self.auth_handler.check_auth(
flows, body, self.hs.get_ip_from_request(request)
)
@@ -373,7 +384,9 @@ class RegisterRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, medium, address):
raise SynapseError(
- 403, "Third party identifier is not allowed",
+ 403,
+ "Third party identifiers (email/phone numbers)" +
+ " are not authorized on this server",
Codes.THREEPID_DENIED,
)
@@ -439,6 +452,12 @@ class RegisterRestServlet(RestServlet):
params.get("bind_msisdn")
)
+ if auth_result and LoginType.TERMS in auth_result:
+ logger.info("%s has consented to the privacy policy" % registered_user_id)
+ yield self.store.user_set_consent_version(
+ registered_user_id, self.hs.config.user_consent_version,
+ )
+
defer.returnValue((200, return_dict))
def on_OPTIONS(self, _):
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
new file mode 100644
index 00000000..ab3f1bd2
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -0,0 +1,387 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017, 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ parse_json_object_from_request,
+ parse_string,
+)
+
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class RoomKeysServlet(RestServlet):
+ PATTERNS = client_v2_patterns(
+ "/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(RoomKeysServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, session_id):
+ """
+ Uploads one or more encrypted E2E room keys for backup purposes.
+ room_id: the ID of the room the keys are for (optional)
+ session_id: the ID for the E2E room keys for the room (optional)
+ version: the version of the user's backup which this data is for.
+ the version must already have been created via the /room_keys/version API.
+
+ Each session has:
+ * first_message_index: a numeric index indicating the oldest message
+ encrypted by this session.
+ * forwarded_count: how many times the uploading client claims this key
+ has been shared (forwarded)
+ * is_verified: whether the client that uploaded the keys claims they
+ were sent by a device which they've verified
+ * session_data: base64-encrypted data describing the session.
+
+ Returns 200 OK on success with body {}
+ Returns 403 Forbidden if the version in question is not the most recently
+ created version (i.e. if this is an old client trying to write to a stale backup)
+ Returns 404 Not Found if the version in question doesn't exist
+
+ The API is designed to be otherwise agnostic to the room_key encryption
+ algorithm being used. Sessions are merged with existing ones in the
+ backup using the heuristics:
+ * is_verified sessions always win over unverified sessions
+ * older first_message_index always win over newer sessions
+ * lower forwarded_count always wins over higher forwarded_count
+
+ We trust the clients not to lie and corrupt their own backups.
+ It also means that if your access_token is stolen, the attacker could
+ delete your backup.
+
+ POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+
+ Or...
+
+ POST /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+
+ Or...
+
+ POST /room_keys/keys?version=1 HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+ }
+ }
+ """
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+ body = parse_json_object_from_request(request)
+ version = parse_string(request, "version")
+
+ if session_id:
+ body = {
+ "sessions": {
+ session_id: body
+ }
+ }
+
+ if room_id:
+ body = {
+ "rooms": {
+ room_id: body
+ }
+ }
+
+ yield self.e2e_room_keys_handler.upload_room_keys(
+ user_id, version, body
+ )
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id, session_id):
+ """
+ Retrieves one or more encrypted E2E room keys for backup purposes.
+ Symmetric with the PUT version of the API.
+
+ room_id: the ID of the room to retrieve the keys for (optional)
+ session_id: the ID for the E2E room keys to retrieve the keys for (optional)
+ version: the version of the user's backup which this data is for.
+ the version must already have been created via the /change_secret API.
+
+ Returns as follows:
+
+ GET /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1
+ {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+
+ Or...
+
+ GET /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1
+ {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+
+ Or...
+
+ GET /room_keys/keys?version=1 HTTP/1.1
+ {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": false,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+ }
+ }
+ """
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+ version = parse_string(request, "version")
+
+ room_keys = yield self.e2e_room_keys_handler.get_room_keys(
+ user_id, version, room_id, session_id
+ )
+
+ # Convert room_keys to the right format to return.
+ if session_id:
+ # If the client requests a specific session, but that session was
+ # not backed up, then return an M_NOT_FOUND.
+ if room_keys['rooms'] == {}:
+ raise NotFoundError("No room_keys found")
+ else:
+ room_keys = room_keys['rooms'][room_id]['sessions'][session_id]
+ elif room_id:
+ # If the client requests all sessions from a room, but no sessions
+ # are found, then return an empty result rather than an error, so
+ # that clients don't have to handle an error condition, and an
+ # empty result is valid. (Similarly if the client requests all
+ # sessions from the backup, but in that case, room_keys is already
+ # in the right format, so we don't need to do anything about it.)
+ if room_keys['rooms'] == {}:
+ room_keys = {'sessions': {}}
+ else:
+ room_keys = room_keys['rooms'][room_id]
+
+ defer.returnValue((200, room_keys))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request, room_id, session_id):
+ """
+ Deletes one or more encrypted E2E room keys for a user for backup purposes.
+
+ DELETE /room_keys/keys/!abc:matrix.org/c0ff33?version=1
+ HTTP/1.1 200 OK
+ {}
+
+ room_id: the ID of the room whose keys to delete (optional)
+ session_id: the ID for the E2E session to delete (optional)
+ version: the version of the user's backup which this data is for.
+ the version must already have been created via the /change_secret API.
+ """
+
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+ version = parse_string(request, "version")
+
+ yield self.e2e_room_keys_handler.delete_room_keys(
+ user_id, version, room_id, session_id
+ )
+ defer.returnValue((200, {}))
+
+
+class RoomKeysNewVersionServlet(RestServlet):
+ PATTERNS = client_v2_patterns(
+ "/room_keys/version$"
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(RoomKeysNewVersionServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ """
+ Create a new backup version for this user's room_keys with the given
+ info. The version is allocated by the server and returned to the user
+ in the response. This API is intended to be used whenever the user
+ changes the encryption key for their backups, ensuring that backups
+ encrypted with different keys don't collide.
+
+ It takes out an exclusive lock on this user's room_key backups, to ensure
+ clients only upload to the current backup.
+
+ The algorithm passed in the version info is a reverse-DNS namespaced
+ identifier to describe the format of the encrypted backupped keys.
+
+ The auth_data is { user_id: "user_id", nonce: <random string> }
+ encrypted using the algorithm and current encryption key described above.
+
+ POST /room_keys/version
+ Content-Type: application/json
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
+ }
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ {
+ "version": 12345
+ }
+ """
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+ info = parse_json_object_from_request(request)
+
+ new_version = yield self.e2e_room_keys_handler.create_version(
+ user_id, info
+ )
+ defer.returnValue((200, {"version": new_version}))
+
+ # we deliberately don't have a PUT /version, as these things really should
+ # be immutable to avoid people footgunning
+
+
+class RoomKeysVersionServlet(RestServlet):
+ PATTERNS = client_v2_patterns(
+ "/room_keys/version(/(?P<version>[^/]+))?$"
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(RoomKeysVersionServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, version):
+ """
+ Retrieve the version information about a given version of the user's
+ room_keys backup. If the version part is missing, returns info about the
+ most current backup version (if any)
+
+ It takes out an exclusive lock on this user's room_key backups, to ensure
+ clients only upload to the current backup.
+
+ Returns 404 if the given version does not exist.
+
+ GET /room_keys/version/12345 HTTP/1.1
+ {
+ "version": "12345",
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
+ }
+ """
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+
+ try:
+ info = yield self.e2e_room_keys_handler.get_version_info(
+ user_id, version
+ )
+ except SynapseError as e:
+ if e.code == 404:
+ raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
+ defer.returnValue((200, info))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request, version):
+ """
+ Delete the information about a given version of the user's
+ room_keys backup. If the version part is missing, deletes the most
+ current backup version (if any). Doesn't delete the actual room data.
+
+ DELETE /room_keys/version/12345 HTTP/1.1
+ HTTP/1.1 200 OK
+ {}
+ """
+ if version is None:
+ raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND)
+
+ requester = yield self.auth.get_user_by_req(request, allow_guest=False)
+ user_id = requester.user.to_string()
+
+ yield self.e2e_room_keys_handler.delete_version(
+ user_id, version
+ )
+ defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+ RoomKeysServlet(hs).register(http_server)
+ RoomKeysVersionServlet(hs).register(http_server)
+ RoomKeysNewVersionServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
new file mode 100644
index 00000000..e6356101
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import KNOWN_ROOM_VERSIONS
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+
+from ._base import client_v2_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class RoomUpgradeRestServlet(RestServlet):
+ """Handler for room uprade requests.
+
+ Handles requests of the form:
+
+ POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "new_version": "2",
+ }
+
+ Creates a new room and shuts down the old one. Returns the ID of the new room.
+
+ Args:
+ hs (synapse.server.HomeServer):
+ """
+ PATTERNS = client_v2_patterns(
+ # /rooms/$roomid/upgrade
+ "/rooms/(?P<room_id>[^/]*)/upgrade$",
+ v2_alpha=False,
+ )
+
+ def __init__(self, hs):
+ super(RoomUpgradeRestServlet, self).__init__()
+ self._hs = hs
+ self._room_creation_handler = hs.get_room_creation_handler()
+ self._auth = hs.get_auth()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id):
+ requester = yield self._auth.get_user_by_req(request)
+
+ content = parse_json_object_from_request(request)
+ assert_params_in_dict(content, ("new_version", ))
+ new_version = content["new_version"]
+
+ if new_version not in KNOWN_ROOM_VERSIONS:
+ raise SynapseError(
+ 400,
+ "Your homeserver does not support this room version",
+ Codes.UNSUPPORTED_ROOM_VERSION,
+ )
+
+ new_room_id = yield self._room_creation_handler.upgrade_room(
+ requester, room_id, new_version
+ )
+
+ ret = {
+ "replacement_room": new_room_id,
+ }
+
+ defer.returnValue((200, ret))
+
+
+def register_servlets(hs, http_server):
+ RoomUpgradeRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 1275baa1..02511467 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -25,6 +25,7 @@ from synapse.api.errors import SynapseError
from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection
from synapse.events.utils import (
format_event_for_client_v2_without_room_id,
+ format_event_raw,
serialize_event,
)
from synapse.handlers.presence import format_user_presence_state
@@ -88,7 +89,7 @@ class SyncRestServlet(RestServlet):
@defer.inlineCallbacks
def on_GET(self, request):
- if "from" in request.args:
+ if b"from" in request.args:
# /events used to use 'from', but /sync uses 'since'.
# Lets be helpful and whine if we see a 'from'.
raise SynapseError(
@@ -175,17 +176,28 @@ class SyncRestServlet(RestServlet):
@staticmethod
def encode_response(time_now, sync_result, access_token_id, filter):
+ if filter.event_format == 'client':
+ event_formatter = format_event_for_client_v2_without_room_id
+ elif filter.event_format == 'federation':
+ event_formatter = format_event_raw
+ else:
+ raise Exception("Unknown event format %s" % (filter.event_format, ))
+
joined = SyncRestServlet.encode_joined(
- sync_result.joined, time_now, access_token_id, filter.event_fields
+ sync_result.joined, time_now, access_token_id,
+ filter.event_fields,
+ event_formatter,
)
invited = SyncRestServlet.encode_invited(
sync_result.invited, time_now, access_token_id,
+ event_formatter,
)
archived = SyncRestServlet.encode_archived(
sync_result.archived, time_now, access_token_id,
filter.event_fields,
+ event_formatter,
)
return {
@@ -228,7 +240,7 @@ class SyncRestServlet(RestServlet):
}
@staticmethod
- def encode_joined(rooms, time_now, token_id, event_fields):
+ def encode_joined(rooms, time_now, token_id, event_fields, event_formatter):
"""
Encode the joined rooms in a sync result
@@ -240,7 +252,9 @@ class SyncRestServlet(RestServlet):
token_id(int): ID of the user's auth token - used for namespacing
of transaction IDs
event_fields(list<str>): List of event fields to include. If empty,
- all fields will be returned.
+ all fields will be returned.
+ event_formatter (func[dict]): function to convert from federation format
+ to client format
Returns:
dict[str, dict[str, object]]: the joined rooms list, in our
response format
@@ -248,13 +262,14 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = SyncRestServlet.encode_room(
- room, time_now, token_id, only_fields=event_fields
+ room, time_now, token_id, joined=True, only_fields=event_fields,
+ event_formatter=event_formatter,
)
return joined
@staticmethod
- def encode_invited(rooms, time_now, token_id):
+ def encode_invited(rooms, time_now, token_id, event_formatter):
"""
Encode the invited rooms in a sync result
@@ -264,7 +279,9 @@ class SyncRestServlet(RestServlet):
time_now(int): current time - used as a baseline for age
calculations
token_id(int): ID of the user's auth token - used for namespacing
- of transaction IDs
+ of transaction IDs
+ event_formatter (func[dict]): function to convert from federation format
+ to client format
Returns:
dict[str, dict[str, object]]: the invited rooms list, in our
@@ -274,7 +291,7 @@ class SyncRestServlet(RestServlet):
for room in rooms:
invite = serialize_event(
room.invite, time_now, token_id=token_id,
- event_format=format_event_for_client_v2_without_room_id,
+ event_format=event_formatter,
is_invite=True,
)
unsigned = dict(invite.get("unsigned", {}))
@@ -288,7 +305,7 @@ class SyncRestServlet(RestServlet):
return invited
@staticmethod
- def encode_archived(rooms, time_now, token_id, event_fields):
+ def encode_archived(rooms, time_now, token_id, event_fields, event_formatter):
"""
Encode the archived rooms in a sync result
@@ -300,7 +317,9 @@ class SyncRestServlet(RestServlet):
token_id(int): ID of the user's auth token - used for namespacing
of transaction IDs
event_fields(list<str>): List of event fields to include. If empty,
- all fields will be returned.
+ all fields will be returned.
+ event_formatter (func[dict]): function to convert from federation format
+ to client format
Returns:
dict[str, dict[str, object]]: The invited rooms list, in our
response format
@@ -308,13 +327,18 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = SyncRestServlet.encode_room(
- room, time_now, token_id, joined=False, only_fields=event_fields
+ room, time_now, token_id, joined=False,
+ only_fields=event_fields,
+ event_formatter=event_formatter,
)
return joined
@staticmethod
- def encode_room(room, time_now, token_id, joined=True, only_fields=None):
+ def encode_room(
+ room, time_now, token_id, joined,
+ only_fields, event_formatter,
+ ):
"""
Args:
room (JoinedSyncResult|ArchivedSyncResult): sync result for a
@@ -326,14 +350,15 @@ class SyncRestServlet(RestServlet):
joined (bool): True if the user is joined to this room - will mean
we handle ephemeral events
only_fields(list<str>): Optional. The list of event fields to include.
+ event_formatter (func[dict]): function to convert from federation format
+ to client format
Returns:
dict[str, object]: the room, encoded in our response format
"""
def serialize(event):
- # TODO(mjark): Respect formatting requirements in the filter.
return serialize_event(
event, time_now, token_id=token_id,
- event_format=format_event_for_client_v2_without_room_id,
+ event_format=event_formatter,
only_event_fields=only_fields,
)
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index d9d37918..b9b5d076 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -79,7 +79,7 @@ class ThirdPartyUserServlet(RestServlet):
yield self.auth.get_user_by_req(request, allow_guest=True)
fields = request.args
- fields.pop("access_token", None)
+ fields.pop(b"access_token", None)
results = yield self.appservice_handler.query_3pe(
ThirdPartyEntityKind.USER, protocol, fields
@@ -102,7 +102,7 @@ class ThirdPartyLocationServlet(RestServlet):
yield self.auth.get_user_by_req(request, allow_guest=True)
fields = request.args
- fields.pop("access_token", None)
+ fields.pop(b"access_token", None)
results = yield self.appservice_handler.query_3pe(
ThirdPartyEntityKind.LOCATION, protocol, fields
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 7362e185..ad525b22 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -137,27 +137,36 @@ class ConsentResource(Resource):
request (twisted.web.http.Request):
"""
- version = parse_string(request, "v",
- default=self._default_consent_version)
- username = parse_string(request, "u", required=True)
- userhmac = parse_string(request, "h", required=True, encoding=None)
+ version = parse_string(request, "v", default=self._default_consent_version)
+ username = parse_string(request, "u", required=False, default="")
+ userhmac = None
+ has_consented = False
+ public_version = username == ""
+ if not public_version:
+ userhmac_bytes = parse_string(request, "h", required=True, encoding=None)
- self._check_hash(username, userhmac)
+ self._check_hash(username, userhmac_bytes)
- if username.startswith('@'):
- qualified_user_id = username
- else:
- qualified_user_id = UserID(username, self.hs.hostname).to_string()
+ if username.startswith('@'):
+ qualified_user_id = username
+ else:
+ qualified_user_id = UserID(username, self.hs.hostname).to_string()
- u = yield self.store.get_user_by_id(qualified_user_id)
- if u is None:
- raise NotFoundError("Unknown user")
+ u = yield self.store.get_user_by_id(qualified_user_id)
+ if u is None:
+ raise NotFoundError("Unknown user")
+
+ has_consented = u["consent_version"] == version
+ userhmac = userhmac_bytes.decode("ascii")
try:
self._render_template(
request, "%s.html" % (version,),
- user=username, userhmac=userhmac, version=version,
- has_consented=(u["consent_version"] == version),
+ user=username,
+ userhmac=userhmac,
+ version=version,
+ has_consented=has_consented,
+ public_version=public_version,
)
except TemplateNotFound:
raise NotFoundError("Unknown policy version")
@@ -223,7 +232,7 @@ class ConsentResource(Resource):
key=self._hmac_secret,
msg=userid.encode('utf-8'),
digestmod=sha256,
- ).hexdigest()
+ ).hexdigest().encode('ascii')
if not compare_digest(want_mac, userhmac):
raise SynapseError(http_client.FORBIDDEN, "HMAC incorrect")
diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py
deleted file mode 100644
index fe0ac3f8..00000000
--- a/synapse/rest/key/v1/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py
deleted file mode 100644
index b9ee6e1c..00000000
--- a/synapse/rest/key/v1/server_key_resource.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import logging
-
-from canonicaljson import encode_canonical_json
-from signedjson.sign import sign_json
-from unpaddedbase64 import encode_base64
-
-from OpenSSL import crypto
-from twisted.web.resource import Resource
-
-from synapse.http.server import respond_with_json_bytes
-
-logger = logging.getLogger(__name__)
-
-
-class LocalKey(Resource):
- """HTTP resource containing encoding the TLS X.509 certificate and NACL
- signature verification keys for this server::
-
- GET /key HTTP/1.1
-
- HTTP/1.1 200 OK
- Content-Type: application/json
- {
- "server_name": "this.server.example.com"
- "verify_keys": {
- "algorithm:version": # base64 encoded NACL verification key.
- },
- "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
- "signatures": {
- "this.server.example.com": {
- "algorithm:version": # NACL signature for this server.
- }
- }
- }
- """
-
- def __init__(self, hs):
- self.response_body = encode_canonical_json(
- self.response_json_object(hs.config)
- )
- Resource.__init__(self)
-
- @staticmethod
- def response_json_object(server_config):
- verify_keys = {}
- for key in server_config.signing_key:
- verify_key_bytes = key.verify_key.encode()
- key_id = "%s:%s" % (key.alg, key.version)
- verify_keys[key_id] = encode_base64(verify_key_bytes)
-
- x509_certificate_bytes = crypto.dump_certificate(
- crypto.FILETYPE_ASN1,
- server_config.tls_certificate
- )
- json_object = {
- u"server_name": server_config.server_name,
- u"verify_keys": verify_keys,
- u"tls_certificate": encode_base64(x509_certificate_bytes)
- }
- for key in server_config.signing_key:
- json_object = sign_json(
- json_object,
- server_config.server_name,
- key,
- )
-
- return json_object
-
- def render_GET(self, request):
- return respond_with_json_bytes(
- request, 200, self.response_body,
- )
-
- def getChild(self, name, request):
- if name == '':
- return self
diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py
index 3491fd21..cb5abcf8 100644
--- a/synapse/rest/key/v2/__init__.py
+++ b/synapse/rest/key/v2/__init__.py
@@ -22,5 +22,5 @@ from .remote_key_resource import RemoteKey
class KeyApiV2Resource(Resource):
def __init__(self, hs):
Resource.__init__(self)
- self.putChild("server", LocalKey(hs))
- self.putChild("query", RemoteKey(hs))
+ self.putChild(b"server", LocalKey(hs))
+ self.putChild(b"query", RemoteKey(hs))
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 7d67e4b0..eb8782aa 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -103,7 +103,7 @@ class RemoteKey(Resource):
def async_render_GET(self, request):
if len(request.postpath) == 1:
server, = request.postpath
- query = {server: {}}
+ query = {server.decode('ascii'): {}}
elif len(request.postpath) == 2:
server, key_id = request.postpath
minimum_valid_until_ts = parse_integer(
@@ -112,11 +112,12 @@ class RemoteKey(Resource):
arguments = {}
if minimum_valid_until_ts is not None:
arguments["minimum_valid_until_ts"] = minimum_valid_until_ts
- query = {server: {key_id: arguments}}
+ query = {server.decode('ascii'): {key_id.decode('ascii'): arguments}}
else:
raise SynapseError(
404, "Not found %r" % request.postpath, Codes.NOT_FOUND
)
+
yield self.query_keys(request, query, query_remote_on_cache_miss=True)
def render_POST(self, request):
@@ -135,6 +136,7 @@ class RemoteKey(Resource):
@defer.inlineCallbacks
def query_keys(self, request, query, query_remote_on_cache_miss=False):
logger.info("Handling query for keys %r", query)
+
store_queries = []
for server_name, key_ids in query.items():
if (
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
index f255f288..5a426ff2 100644
--- a/synapse/rest/media/v0/content_repository.py
+++ b/synapse/rest/media/v0/content_repository.py
@@ -56,7 +56,7 @@ class ContentRepoResource(resource.Resource):
# servers.
# TODO: A little crude here, we could do this better.
- filename = request.path.split('/')[-1]
+ filename = request.path.decode('ascii').split('/')[-1]
# be paranoid
filename = re.sub("[^0-9A-z.-_]", "", filename)
@@ -78,7 +78,7 @@ class ContentRepoResource(resource.Resource):
# select private. don't bother setting Expires as all our matrix
# clients are smart enough to be happy with Cache-Control (right?)
request.setHeader(
- "Cache-Control", "public,max-age=86400,s-maxage=86400"
+ b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
)
d = FileSender().beginFileTransfer(f, request)
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 65f4bd29..76e479af 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -15,9 +15,8 @@
import logging
import os
-import urllib
-from six.moves.urllib import parse as urlparse
+from six.moves import urllib
from twisted.internet import defer
from twisted.protocols.basic import FileSender
@@ -35,10 +34,15 @@ def parse_media_id(request):
# This allows users to append e.g. /test.png to the URL. Useful for
# clients that parse the URL to see content type.
server_name, media_id = request.postpath[:2]
+
+ if isinstance(server_name, bytes):
+ server_name = server_name.decode('utf-8')
+ media_id = media_id.decode('utf8')
+
file_name = None
if len(request.postpath) > 2:
try:
- file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8")
+ file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8"))
except UnicodeDecodeError:
pass
return server_name, media_id, file_name
@@ -93,22 +97,18 @@ def add_file_headers(request, media_type, file_size, upload_name):
file_size (int): Size in bytes of the media, if known.
upload_name (str): The name of the requested file, if any.
"""
+ def _quote(x):
+ return urllib.parse.quote(x.encode("utf-8"))
+
request.setHeader(b"Content-Type", media_type.encode("UTF-8"))
if upload_name:
if is_ascii(upload_name):
- request.setHeader(
- b"Content-Disposition",
- b"inline; filename=%s" % (
- urllib.quote(upload_name.encode("utf-8")),
- ),
- )
+ disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii")
else:
- request.setHeader(
- b"Content-Disposition",
- b"inline; filename*=utf-8''%s" % (
- urllib.quote(upload_name.encode("utf-8")),
- ),
- )
+ disposition = (
+ "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii")
+
+ request.setHeader(b"Content-Disposition", disposition)
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
index fbfa85f7..f911b120 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/v1/download_resource.py
@@ -47,12 +47,13 @@ class DownloadResource(Resource):
def _async_render_GET(self, request):
set_cors_headers(request)
request.setHeader(
- "Content-Security-Policy",
- "default-src 'none';"
- " script-src 'none';"
- " plugin-types application/pdf;"
- " style-src 'unsafe-inline';"
- " object-src 'self';"
+ b"Content-Security-Policy",
+ b"default-src 'none';"
+ b" script-src 'none';"
+ b" plugin-types application/pdf;"
+ b" style-src 'unsafe-inline';"
+ b" media-src 'self';"
+ b" object-src 'self';"
)
server_name, media_id, name = parse_media_id(request)
if server_name == self.server_name:
diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py
deleted file mode 100644
index bdbd8d50..00000000
--- a/synapse/rest/media/v1/identicon_resource.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from pydenticon import Generator
-
-from twisted.web.resource import Resource
-
-from synapse.http.servlet import parse_integer
-
-FOREGROUND = [
- "rgb(45,79,255)",
- "rgb(254,180,44)",
- "rgb(226,121,234)",
- "rgb(30,179,253)",
- "rgb(232,77,65)",
- "rgb(49,203,115)",
- "rgb(141,69,170)"
-]
-
-BACKGROUND = "rgb(224,224,224)"
-SIZE = 5
-
-
-class IdenticonResource(Resource):
- isLeaf = True
-
- def __init__(self):
- Resource.__init__(self)
- self.generator = Generator(
- SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
- )
-
- def generate_identicon(self, name, width, height):
- v_padding = width % SIZE
- h_padding = height % SIZE
- top_padding = v_padding // 2
- left_padding = h_padding // 2
- bottom_padding = v_padding - top_padding
- right_padding = h_padding - left_padding
- width -= v_padding
- height -= h_padding
- padding = (top_padding, bottom_padding, left_padding, right_padding)
- identicon = self.generator.generate(
- name, width, height, padding=padding
- )
- return identicon
-
- def render_GET(self, request):
- name = "/".join(request.postpath)
- width = parse_integer(request, "width", default=96)
- height = parse_integer(request, "height", default=96)
- identicon_bytes = self.generate_identicon(name, width, height)
- request.setHeader(b"Content-Type", b"image/png")
- request.setHeader(
- b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
- )
- return identicon_bytes
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 241c9720..d6c5f07a 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -20,12 +20,12 @@ import logging
import os
import shutil
-from six import iteritems
+from six import PY3, iteritems
from six.moves.urllib import parse as urlparse
import twisted.internet.error
import twisted.web.http
-from twisted.internet import defer, threads
+from twisted.internet import defer
from twisted.web.resource import Resource
from synapse.api.errors import (
@@ -36,8 +36,8 @@ from synapse.api.errors import (
)
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import logcontext
from synapse.util.async_helpers import Linearizer
-from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import is_ascii, random_string
@@ -45,7 +45,6 @@ from ._base import FileInfo, respond_404, respond_with_responder
from .config_resource import MediaConfigResource
from .download_resource import DownloadResource
from .filepath import MediaFilePaths
-from .identicon_resource import IdenticonResource
from .media_storage import MediaStorage
from .preview_url_resource import PreviewUrlResource
from .storage_provider import StorageProviderWrapper
@@ -397,13 +396,13 @@ class MediaRepository(object):
yield finish()
- media_type = headers["Content-Type"][0]
+ media_type = headers[b"Content-Type"][0].decode('ascii')
time_now_ms = self.clock.time_msec()
- content_disposition = headers.get("Content-Disposition", None)
+ content_disposition = headers.get(b"Content-Disposition", None)
if content_disposition:
- _, params = cgi.parse_header(content_disposition[0],)
+ _, params = cgi.parse_header(content_disposition[0].decode('ascii'),)
upload_name = None
# First check if there is a valid UTF-8 filename
@@ -419,9 +418,13 @@ class MediaRepository(object):
upload_name = upload_name_ascii
if upload_name:
- upload_name = urlparse.unquote(upload_name)
+ if PY3:
+ upload_name = urlparse.unquote(upload_name)
+ else:
+ upload_name = urlparse.unquote(upload_name.encode('ascii'))
try:
- upload_name = upload_name.decode("utf-8")
+ if isinstance(upload_name, bytes):
+ upload_name = upload_name.decode("utf-8")
except UnicodeDecodeError:
upload_name = None
else:
@@ -488,10 +491,11 @@ class MediaRepository(object):
))
thumbnailer = Thumbnailer(input_path)
- t_byte_source = yield make_deferred_yieldable(threads.deferToThread(
+ t_byte_source = yield logcontext.defer_to_thread(
+ self.hs.get_reactor(),
self._generate_thumbnail,
thumbnailer, t_width, t_height, t_method, t_type
- ))
+ )
if t_byte_source:
try:
@@ -530,10 +534,11 @@ class MediaRepository(object):
))
thumbnailer = Thumbnailer(input_path)
- t_byte_source = yield make_deferred_yieldable(threads.deferToThread(
+ t_byte_source = yield logcontext.defer_to_thread(
+ self.hs.get_reactor(),
self._generate_thumbnail,
thumbnailer, t_width, t_height, t_method, t_type
- ))
+ )
if t_byte_source:
try:
@@ -616,15 +621,17 @@ class MediaRepository(object):
for (t_width, t_height, t_type), t_method in iteritems(thumbnails):
# Generate the thumbnail
if t_method == "crop":
- t_byte_source = yield make_deferred_yieldable(threads.deferToThread(
+ t_byte_source = yield logcontext.defer_to_thread(
+ self.hs.get_reactor(),
thumbnailer.crop,
t_width, t_height, t_type,
- ))
+ )
elif t_method == "scale":
- t_byte_source = yield make_deferred_yieldable(threads.deferToThread(
+ t_byte_source = yield logcontext.defer_to_thread(
+ self.hs.get_reactor(),
thumbnailer.scale,
t_width, t_height, t_type,
- ))
+ )
else:
logger.error("Unrecognized method: %r", t_method)
continue
@@ -755,14 +762,14 @@ class MediaRepositoryResource(Resource):
Resource.__init__(self)
media_repo = hs.get_media_repository()
- self.putChild("upload", UploadResource(hs, media_repo))
- self.putChild("download", DownloadResource(hs, media_repo))
- self.putChild("thumbnail", ThumbnailResource(
+
+ self.putChild(b"upload", UploadResource(hs, media_repo))
+ self.putChild(b"download", DownloadResource(hs, media_repo))
+ self.putChild(b"thumbnail", ThumbnailResource(
hs, media_repo, media_repo.media_storage,
))
- self.putChild("identicon", IdenticonResource())
if hs.config.url_preview_enabled:
- self.putChild("preview_url", PreviewUrlResource(
+ self.putChild(b"preview_url", PreviewUrlResource(
hs, media_repo, media_repo.media_storage,
))
- self.putChild("config", MediaConfigResource(hs))
+ self.putChild(b"config", MediaConfigResource(hs))
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index a6189224..896078fe 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -21,9 +21,10 @@ import sys
import six
-from twisted.internet import defer, threads
+from twisted.internet import defer
from twisted.protocols.basic import FileSender
+from synapse.util import logcontext
from synapse.util.file_consumer import BackgroundFileConsumer
from synapse.util.logcontext import make_deferred_yieldable
@@ -64,9 +65,10 @@ class MediaStorage(object):
with self.store_into_file(file_info) as (f, fname, finish_cb):
# Write to the main repository
- yield make_deferred_yieldable(threads.deferToThread(
+ yield logcontext.defer_to_thread(
+ self.hs.get_reactor(),
_write_file_synchronously, source, f,
- ))
+ )
yield finish_cb()
defer.returnValue(fname)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 778ef973..91d1dafe 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import cgi
import datetime
import errno
@@ -24,6 +25,7 @@ import shutil
import sys
import traceback
+import six
from six import string_types
from six.moves import urllib_parse as urlparse
@@ -79,7 +81,6 @@ class PreviewUrlResource(Resource):
# don't spider URLs more often than once an hour
expiry_ms=60 * 60 * 1000,
)
- self._cache.start()
self._cleaner_loop = self.clock.looping_call(
self._start_expire_url_cache_data, 10 * 1000,
@@ -99,7 +100,7 @@ class PreviewUrlResource(Resource):
# XXX: if get_user_by_req fails, what should we do in an async render?
requester = yield self.auth.get_user_by_req(request)
url = parse_string(request, "url")
- if "ts" in request.args:
+ if b"ts" in request.args:
ts = parse_integer(request, "ts")
else:
ts = self.clock.time_msec()
@@ -181,7 +182,12 @@ class PreviewUrlResource(Resource):
cache_result["expires_ts"] > ts and
cache_result["response_code"] / 100 == 2
):
- defer.returnValue(cache_result["og"])
+ # It may be stored as text in the database, not as bytes (such as
+ # PostgreSQL). If so, encode it back before handing it on.
+ og = cache_result["og"]
+ if isinstance(og, six.text_type):
+ og = og.encode('utf8')
+ defer.returnValue(og)
return
media_info = yield self._download_url(url, user)
@@ -214,14 +220,17 @@ class PreviewUrlResource(Resource):
elif _is_html(media_info['media_type']):
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
- file = open(media_info['filename'])
- body = file.read()
- file.close()
+ with open(media_info['filename'], 'rb') as file:
+ body = file.read()
# clobber the encoding from the content-type, or default to utf-8
# XXX: this overrides any <meta/> or XML charset headers in the body
# which may pose problems, but so far seems to work okay.
- match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
+ match = re.match(
+ r'.*; *charset="?(.*?)"?(;|$)',
+ media_info['media_type'],
+ re.I
+ )
encoding = match.group(1) if match else "utf-8"
og = decode_and_calc_og(body, media_info['uri'], encoding)
@@ -261,7 +270,7 @@ class PreviewUrlResource(Resource):
logger.debug("Calculated OG for %s as %s" % (url, og))
- jsonog = json.dumps(og)
+ jsonog = json.dumps(og).encode('utf8')
# store OG in history-aware DB cache
yield self.store.store_url_cache(
@@ -301,20 +310,20 @@ class PreviewUrlResource(Resource):
logger.warn("Error downloading %s: %r", url, e)
raise SynapseError(
500, "Failed to download content: %s" % (
- traceback.format_exception_only(sys.exc_type, e),
+ traceback.format_exception_only(sys.exc_info()[0], e),
),
Codes.UNKNOWN,
)
yield finish()
try:
- if "Content-Type" in headers:
- media_type = headers["Content-Type"][0]
+ if b"Content-Type" in headers:
+ media_type = headers[b"Content-Type"][0].decode('ascii')
else:
media_type = "application/octet-stream"
time_now_ms = self.clock.time_msec()
- content_disposition = headers.get("Content-Disposition", None)
+ content_disposition = headers.get(b"Content-Disposition", None)
if content_disposition:
_, params = cgi.parse_header(content_disposition[0],)
download_name = None
@@ -597,10 +606,13 @@ def _iterate_over_text(tree, *tags_to_ignore):
# to be returned.
elements = iter([tree])
while True:
- el = next(elements)
+ el = next(elements, None)
+ if el is None:
+ return
+
if isinstance(el, string_types):
yield el
- elif el is not None and el.tag not in tags_to_ignore:
+ elif el.tag not in tags_to_ignore:
# el.text is the text before the first child, so we can immediately
# return it if the text exists.
if el.text:
@@ -672,7 +684,7 @@ def summarize_paragraphs(text_nodes, min_size=200, max_size=500):
# This splits the paragraph into words, but keeping the
# (preceeding) whitespace intact so we can easily concat
# words back together.
- for match in re.finditer("\s*\S+", description):
+ for match in re.finditer(r"\s*\S+", description):
word = match.group()
# Keep adding words while the total length is less than
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 7b9f8b4d..5aa03031 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -17,9 +17,10 @@ import logging
import os
import shutil
-from twisted.internet import defer, threads
+from twisted.internet import defer
from synapse.config._base import Config
+from synapse.util import logcontext
from synapse.util.logcontext import run_in_background
from .media_storage import FileResponder
@@ -120,7 +121,8 @@ class FileStorageProviderBackend(StorageProvider):
if not os.path.exists(dirname):
os.makedirs(dirname)
- return threads.deferToThread(
+ return logcontext.defer_to_thread(
+ self.hs.get_reactor(),
shutil.copyfile, primary_fname, backup_fname,
)
diff --git a/synapse/server.py b/synapse/server.py
index 938a05f9..9985687b 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,6 +23,7 @@ import abc
import logging
from twisted.enterprise import adbapi
+from twisted.mail.smtp import sendmail
from twisted.web.client import BrowserLikePolicyForHTTPS
from synapse.api.auth import Auth
@@ -51,6 +52,7 @@ from synapse.handlers.deactivate_account import DeactivateAccountHandler
from synapse.handlers.device import DeviceHandler
from synapse.handlers.devicemessage import DeviceMessageHandler
from synapse.handlers.e2e_keys import E2eKeysHandler
+from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.handlers.initial_sync import InitialSyncHandler
@@ -130,6 +132,7 @@ class HomeServer(object):
'auth_handler',
'device_handler',
'e2e_keys_handler',
+ 'e2e_room_keys_handler',
'event_handler',
'event_stream_handler',
'initial_sync_handler',
@@ -172,6 +175,7 @@ class HomeServer(object):
'message_handler',
'pagination_handler',
'room_context_handler',
+ 'sendmail',
]
# This is overridden in derived application classes
@@ -205,6 +209,7 @@ class HomeServer(object):
logger.info("Setting up.")
with self.get_db_conn() as conn:
self.datastore = self.DATASTORE_CLASS(conn, self)
+ conn.commit()
logger.info("Finished setting up.")
def get_reactor(self):
@@ -266,6 +271,9 @@ class HomeServer(object):
def build_room_creation_handler(self):
return RoomCreationHandler(self)
+ def build_sendmail(self):
+ return sendmail
+
def build_state_handler(self):
return StateHandler(self)
@@ -299,6 +307,9 @@ class HomeServer(object):
def build_e2e_keys_handler(self):
return E2eKeysHandler(self)
+ def build_e2e_room_keys_handler(self):
+ return E2eRoomKeysHandler(self)
+
def build_application_service_api(self):
return ApplicationServiceApi(self)
diff --git a/synapse/server.pyi b/synapse/server.pyi
index ce284862..06cd083a 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -7,6 +7,9 @@ import synapse.handlers.auth
import synapse.handlers.deactivate_account
import synapse.handlers.device
import synapse.handlers.e2e_keys
+import synapse.handlers.room
+import synapse.handlers.room_member
+import synapse.handlers.message
import synapse.handlers.set_password
import synapse.rest.media.v1.media_repository
import synapse.server_notices.server_notices_manager
@@ -50,6 +53,9 @@ class HomeServer(object):
def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
pass
+ def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
+ pass
+
def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
pass
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index d7ae22a6..70048b0c 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -19,13 +19,14 @@ from collections import namedtuple
from six import iteritems, itervalues
+import attr
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RoomVersions
from synapse.events.snapshot import EventContext
-from synapse.state import v1
+from synapse.state import v1, v2
from synapse.util.async_helpers import Linearizer
from synapse.util.caches import get_cache_factor_for
from synapse.util.caches.expiringcache import ExpiringCache
@@ -95,10 +96,6 @@ class StateHandler(object):
self.hs = hs
self._state_resolution_handler = hs.get_state_resolution_handler()
- def start_caching(self):
- # TODO: remove this shim
- self._state_resolution_handler.start_caching()
-
@defer.inlineCallbacks
def get_current_state(self, room_id, event_type=None, state_key="",
latest_event_ids=None):
@@ -264,7 +261,7 @@ class StateHandler(object):
logger.debug("calling resolve_state_groups from compute_event_context")
entry = yield self.resolve_state_groups_for_events(
- event.room_id, [e for e, _ in event.prev_events],
+ event.room_id, event.prev_event_ids(),
)
prev_state_ids = entry.state
@@ -376,15 +373,10 @@ class StateHandler(object):
result = yield self._state_resolution_handler.resolve_state_groups(
room_id, room_version, state_groups_ids, None,
- self._state_map_factory,
+ state_res_store=StateResolutionStore(self.store),
)
defer.returnValue(result)
- def _state_map_factory(self, ev_ids):
- return self.store.get_events(
- ev_ids, get_prev_content=False, check_redacted=False,
- )
-
@defer.inlineCallbacks
def resolve_events(self, room_version, state_sets, event):
logger.info(
@@ -402,10 +394,10 @@ class StateHandler(object):
}
with Measure(self.clock, "state._resolve_events"):
- new_state = yield resolve_events_with_factory(
+ new_state = yield resolve_events_with_store(
room_version, state_set_ids,
event_map=state_map,
- state_map_factory=self._state_map_factory
+ state_res_store=StateResolutionStore(self.store),
)
new_state = {
@@ -428,9 +420,6 @@ class StateResolutionHandler(object):
self._state_cache = None
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
- def start_caching(self):
- logger.debug("start_caching")
-
self._state_cache = ExpiringCache(
cache_name="state_cache",
clock=self.clock,
@@ -440,12 +429,10 @@ class StateResolutionHandler(object):
reset_expiry_on_get=True,
)
- self._state_cache.start()
-
@defer.inlineCallbacks
@log_function
def resolve_state_groups(
- self, room_id, room_version, state_groups_ids, event_map, state_map_factory,
+ self, room_id, room_version, state_groups_ids, event_map, state_res_store,
):
"""Resolves conflicts between a set of state groups
@@ -463,9 +450,11 @@ class StateResolutionHandler(object):
a dict from event_id to event, for any events that we happen to
have in flight (eg, those currently being persisted). This will be
used as a starting point fof finding the state we need; any missing
- events will be requested via state_map_factory.
+ events will be requested via state_res_store.
- If None, all events will be fetched via state_map_factory.
+ If None, all events will be fetched via state_res_store.
+
+ state_res_store (StateResolutionStore)
Returns:
Deferred[_StateCacheEntry]: resolved state
@@ -489,10 +478,10 @@ class StateResolutionHandler(object):
# start by assuming we won't have any conflicted state, and build up the new
# state map by iterating through the state groups. If we discover a conflict,
- # we give up and instead use `resolve_events_with_factory`.
+ # we give up and instead use `resolve_events_with_store`.
#
# XXX: is this actually worthwhile, or should we just let
- # resolve_events_with_factory do it?
+ # resolve_events_with_store do it?
new_state = {}
conflicted_state = False
for st in itervalues(state_groups_ids):
@@ -507,11 +496,11 @@ class StateResolutionHandler(object):
if conflicted_state:
logger.info("Resolving conflicted state for %r", room_id)
with Measure(self.clock, "state._resolve_events"):
- new_state = yield resolve_events_with_factory(
+ new_state = yield resolve_events_with_store(
room_version,
list(itervalues(state_groups_ids)),
event_map=event_map,
- state_map_factory=state_map_factory,
+ state_res_store=state_res_store,
)
# if the new state matches any of the input state groups, we can
@@ -592,7 +581,7 @@ def _make_state_cache_entry(
)
-def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory):
+def resolve_events_with_store(room_version, state_sets, event_map, state_res_store):
"""
Args:
room_version(str): Version of the room
@@ -608,17 +597,19 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f
If None, all events will be fetched via state_map_factory.
- state_map_factory(func): will be called
- with a list of event_ids that are needed, and should return with
- a Deferred of dict of event_id to event.
+ state_res_store (StateResolutionStore)
Returns
Deferred[dict[(str, str), str]]:
a map from (type, state_key) to event_id.
"""
- if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,):
- return v1.resolve_events_with_factory(
- state_sets, event_map, state_map_factory,
+ if room_version == RoomVersions.V1:
+ return v1.resolve_events_with_store(
+ state_sets, event_map, state_res_store.get_events,
+ )
+ elif room_version in (RoomVersions.VDH_TEST, RoomVersions.STATE_V2_TEST):
+ return v2.resolve_events_with_store(
+ state_sets, event_map, state_res_store,
)
else:
# This should only happen if we added a version but forgot to add it to
@@ -626,3 +617,54 @@ def resolve_events_with_factory(room_version, state_sets, event_map, state_map_f
raise Exception(
"No state resolution algorithm defined for version %r" % (room_version,)
)
+
+
+@attr.s
+class StateResolutionStore(object):
+ """Interface that allows state resolution algorithms to access the database
+ in well defined way.
+
+ Args:
+ store (DataStore)
+ """
+
+ store = attr.ib()
+
+ def get_events(self, event_ids, allow_rejected=False):
+ """Get events from the database
+
+ Args:
+ event_ids (list): The event_ids of the events to fetch
+ allow_rejected (bool): If True return rejected events.
+
+ Returns:
+ Deferred[dict[str, FrozenEvent]]: Dict from event_id to event.
+ """
+
+ return self.store.get_events(
+ event_ids,
+ check_redacted=False,
+ get_prev_content=False,
+ allow_rejected=allow_rejected,
+ )
+
+ def get_auth_chain(self, event_ids):
+ """Gets the full auth chain for a set of events (including rejected
+ events).
+
+ Includes the given event IDs in the result.
+
+ Note that:
+ 1. All events must be state events.
+ 2. For v1 rooms this may not have the full auth chain in the
+ presence of rejected events
+
+ Args:
+ event_ids (list): The event IDs of the events to fetch the auth
+ chain for. Must be state events.
+
+ Returns:
+ Deferred[list[str]]: List of event IDs of the auth chain.
+ """
+
+ return self.store.get_auth_chain_ids(event_ids, include_given=True)
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index c95477d3..70a981f4 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -31,7 +31,7 @@ POWER_KEY = (EventTypes.PowerLevels, "")
@defer.inlineCallbacks
-def resolve_events_with_factory(state_sets, event_map, state_map_factory):
+def resolve_events_with_store(state_sets, event_map, state_map_factory):
"""
Args:
state_sets(list): List of dicts of (type, state_key) -> event_id,
@@ -65,10 +65,15 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
for event_ids in itervalues(conflicted_state)
for event_id in event_ids
)
+ needed_event_count = len(needed_events)
if event_map is not None:
needed_events -= set(iterkeys(event_map))
- logger.info("Asking for %d conflicted events", len(needed_events))
+ logger.info(
+ "Asking for %d/%d conflicted events",
+ len(needed_events),
+ needed_event_count,
+ )
# dict[str, FrozenEvent]: a map from state event id to event. Only includes
# the state events which are in conflict (and those in event_map)
@@ -85,11 +90,16 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
)
new_needed_events = set(itervalues(auth_events))
+ new_needed_event_count = len(new_needed_events)
new_needed_events -= needed_events
if event_map is not None:
new_needed_events -= set(iterkeys(event_map))
- logger.info("Asking for %d auth events", len(new_needed_events))
+ logger.info(
+ "Asking for %d/%d auth events",
+ len(new_needed_events),
+ new_needed_event_count,
+ )
state_map_new = yield state_map_factory(new_needed_events)
state_map.update(state_map_new)
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
new file mode 100644
index 00000000..3573bb00
--- /dev/null
+++ b/synapse/state/v2.py
@@ -0,0 +1,548 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import heapq
+import itertools
+import logging
+
+from six import iteritems, itervalues
+
+from twisted.internet import defer
+
+from synapse import event_auth
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError
+
+logger = logging.getLogger(__name__)
+
+
+@defer.inlineCallbacks
+def resolve_events_with_store(state_sets, event_map, state_res_store):
+ """Resolves the state using the v2 state resolution algorithm
+
+ Args:
+ state_sets(list): List of dicts of (type, state_key) -> event_id,
+ which are the different state groups to resolve.
+
+ event_map(dict[str,FrozenEvent]|None):
+ a dict from event_id to event, for any events that we happen to
+ have in flight (eg, those currently being persisted). This will be
+ used as a starting point fof finding the state we need; any missing
+ events will be requested via state_res_store.
+
+ If None, all events will be fetched via state_res_store.
+
+ state_res_store (StateResolutionStore)
+
+ Returns
+ Deferred[dict[(str, str), str]]:
+ a map from (type, state_key) to event_id.
+ """
+
+ logger.debug("Computing conflicted state")
+
+ # We use event_map as a cache, so if its None we need to initialize it
+ if event_map is None:
+ event_map = {}
+
+ # First split up the un/conflicted state
+ unconflicted_state, conflicted_state = _seperate(state_sets)
+
+ if not conflicted_state:
+ defer.returnValue(unconflicted_state)
+
+ logger.debug("%d conflicted state entries", len(conflicted_state))
+ logger.debug("Calculating auth chain difference")
+
+ # Also fetch all auth events that appear in only some of the state sets'
+ # auth chains.
+ auth_diff = yield _get_auth_chain_difference(
+ state_sets, event_map, state_res_store,
+ )
+
+ full_conflicted_set = set(itertools.chain(
+ itertools.chain.from_iterable(itervalues(conflicted_state)),
+ auth_diff,
+ ))
+
+ events = yield state_res_store.get_events([
+ eid for eid in full_conflicted_set
+ if eid not in event_map
+ ], allow_rejected=True)
+ event_map.update(events)
+
+ full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map)
+
+ logger.debug("%d full_conflicted_set entries", len(full_conflicted_set))
+
+ # Get and sort all the power events (kicks/bans/etc)
+ power_events = (
+ eid for eid in full_conflicted_set
+ if _is_power_event(event_map[eid])
+ )
+
+ sorted_power_events = yield _reverse_topological_power_sort(
+ power_events,
+ event_map,
+ state_res_store,
+ full_conflicted_set,
+ )
+
+ logger.debug("sorted %d power events", len(sorted_power_events))
+
+ # Now sequentially auth each one
+ resolved_state = yield _iterative_auth_checks(
+ sorted_power_events, unconflicted_state, event_map,
+ state_res_store,
+ )
+
+ logger.debug("resolved power events")
+
+ # OK, so we've now resolved the power events. Now sort the remaining
+ # events using the mainline of the resolved power level.
+
+ leftover_events = [
+ ev_id
+ for ev_id in full_conflicted_set
+ if ev_id not in sorted_power_events
+ ]
+
+ logger.debug("sorting %d remaining events", len(leftover_events))
+
+ pl = resolved_state.get((EventTypes.PowerLevels, ""), None)
+ leftover_events = yield _mainline_sort(
+ leftover_events, pl, event_map, state_res_store,
+ )
+
+ logger.debug("resolving remaining events")
+
+ resolved_state = yield _iterative_auth_checks(
+ leftover_events, resolved_state, event_map,
+ state_res_store,
+ )
+
+ logger.debug("resolved")
+
+ # We make sure that unconflicted state always still applies.
+ resolved_state.update(unconflicted_state)
+
+ logger.debug("done")
+
+ defer.returnValue(resolved_state)
+
+
+@defer.inlineCallbacks
+def _get_power_level_for_sender(event_id, event_map, state_res_store):
+ """Return the power level of the sender of the given event according to
+ their auth events.
+
+ Args:
+ event_id (str)
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[int]
+ """
+ event = yield _get_event(event_id, event_map, state_res_store)
+
+ pl = None
+ for aid in event.auth_event_ids():
+ aev = yield _get_event(aid, event_map, state_res_store)
+ if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
+ pl = aev
+ break
+
+ if pl is None:
+ # Couldn't find power level. Check if they're the creator of the room
+ for aid in event.auth_event_ids():
+ aev = yield _get_event(aid, event_map, state_res_store)
+ if (aev.type, aev.state_key) == (EventTypes.Create, ""):
+ if aev.content.get("creator") == event.sender:
+ defer.returnValue(100)
+ break
+ defer.returnValue(0)
+
+ level = pl.content.get("users", {}).get(event.sender)
+ if level is None:
+ level = pl.content.get("users_default", 0)
+
+ if level is None:
+ defer.returnValue(0)
+ else:
+ defer.returnValue(int(level))
+
+
+@defer.inlineCallbacks
+def _get_auth_chain_difference(state_sets, event_map, state_res_store):
+ """Compare the auth chains of each state set and return the set of events
+ that only appear in some but not all of the auth chains.
+
+ Args:
+ state_sets (list)
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[set[str]]: Set of event IDs
+ """
+ common = set(itervalues(state_sets[0])).intersection(
+ *(itervalues(s) for s in state_sets[1:])
+ )
+
+ auth_sets = []
+ for state_set in state_sets:
+ auth_ids = set(
+ eid
+ for key, eid in iteritems(state_set)
+ if (key[0] in (
+ EventTypes.Member,
+ EventTypes.ThirdPartyInvite,
+ ) or key in (
+ (EventTypes.PowerLevels, ''),
+ (EventTypes.Create, ''),
+ (EventTypes.JoinRules, ''),
+ )) and eid not in common
+ )
+
+ auth_chain = yield state_res_store.get_auth_chain(auth_ids)
+ auth_ids.update(auth_chain)
+
+ auth_sets.append(auth_ids)
+
+ intersection = set(auth_sets[0]).intersection(*auth_sets[1:])
+ union = set().union(*auth_sets)
+
+ defer.returnValue(union - intersection)
+
+
+def _seperate(state_sets):
+ """Return the unconflicted and conflicted state. This is different than in
+ the original algorithm, as this defines a key to be conflicted if one of
+ the state sets doesn't have that key.
+
+ Args:
+ state_sets (list)
+
+ Returns:
+ tuple[dict, dict]: A tuple of unconflicted and conflicted state. The
+ conflicted state dict is a map from type/state_key to set of event IDs
+ """
+ unconflicted_state = {}
+ conflicted_state = {}
+
+ for key in set(itertools.chain.from_iterable(state_sets)):
+ event_ids = set(state_set.get(key) for state_set in state_sets)
+ if len(event_ids) == 1:
+ unconflicted_state[key] = event_ids.pop()
+ else:
+ event_ids.discard(None)
+ conflicted_state[key] = event_ids
+
+ return unconflicted_state, conflicted_state
+
+
+def _is_power_event(event):
+ """Return whether or not the event is a "power event", as defined by the
+ v2 state resolution algorithm
+
+ Args:
+ event (FrozenEvent)
+
+ Returns:
+ boolean
+ """
+ if (event.type, event.state_key) in (
+ (EventTypes.PowerLevels, ""),
+ (EventTypes.JoinRules, ""),
+ (EventTypes.Create, ""),
+ ):
+ return True
+
+ if event.type == EventTypes.Member:
+ if event.membership in ('leave', 'ban'):
+ return event.sender != event.state_key
+
+ return False
+
+
+@defer.inlineCallbacks
+def _add_event_and_auth_chain_to_graph(graph, event_id, event_map,
+ state_res_store, auth_diff):
+ """Helper function for _reverse_topological_power_sort that add the event
+ and its auth chain (that is in the auth diff) to the graph
+
+ Args:
+ graph (dict[str, set[str]]): A map from event ID to the events auth
+ event IDs
+ event_id (str): Event to add to the graph
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+ auth_diff (set[str]): Set of event IDs that are in the auth difference.
+ """
+
+ state = [event_id]
+ while state:
+ eid = state.pop()
+ graph.setdefault(eid, set())
+
+ event = yield _get_event(eid, event_map, state_res_store)
+ for aid in event.auth_event_ids():
+ if aid in auth_diff:
+ if aid not in graph:
+ state.append(aid)
+
+ graph.setdefault(eid, set()).add(aid)
+
+
+@defer.inlineCallbacks
+def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_diff):
+ """Returns a list of the event_ids sorted by reverse topological ordering,
+ and then by power level and origin_server_ts
+
+ Args:
+ event_ids (list[str]): The events to sort
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+ auth_diff (set[str]): Set of event IDs that are in the auth difference.
+
+ Returns:
+ Deferred[list[str]]: The sorted list
+ """
+
+ graph = {}
+ for event_id in event_ids:
+ yield _add_event_and_auth_chain_to_graph(
+ graph, event_id, event_map, state_res_store, auth_diff,
+ )
+
+ event_to_pl = {}
+ for event_id in graph:
+ pl = yield _get_power_level_for_sender(event_id, event_map, state_res_store)
+ event_to_pl[event_id] = pl
+
+ def _get_power_order(event_id):
+ ev = event_map[event_id]
+ pl = event_to_pl[event_id]
+
+ return -pl, ev.origin_server_ts, event_id
+
+ # Note: graph is modified during the sort
+ it = lexicographical_topological_sort(
+ graph,
+ key=_get_power_order,
+ )
+ sorted_events = list(it)
+
+ defer.returnValue(sorted_events)
+
+
+@defer.inlineCallbacks
+def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
+ """Sequentially apply auth checks to each event in given list, updating the
+ state as it goes along.
+
+ Args:
+ event_ids (list[str]): Ordered list of events to apply auth checks to
+ base_state (dict[tuple[str, str], str]): The set of state to start with
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[dict[tuple[str, str], str]]: Returns the final updated state
+ """
+ resolved_state = base_state.copy()
+
+ for event_id in event_ids:
+ event = event_map[event_id]
+
+ auth_events = {}
+ for aid in event.auth_event_ids():
+ ev = yield _get_event(aid, event_map, state_res_store)
+
+ if ev.rejected_reason is None:
+ auth_events[(ev.type, ev.state_key)] = ev
+
+ for key in event_auth.auth_types_for_event(event):
+ if key in resolved_state:
+ ev_id = resolved_state[key]
+ ev = yield _get_event(ev_id, event_map, state_res_store)
+
+ if ev.rejected_reason is None:
+ auth_events[key] = event_map[ev_id]
+
+ try:
+ event_auth.check(
+ event, auth_events,
+ do_sig_check=False,
+ do_size_check=False
+ )
+
+ resolved_state[(event.type, event.state_key)] = event_id
+ except AuthError:
+ pass
+
+ defer.returnValue(resolved_state)
+
+
+@defer.inlineCallbacks
+def _mainline_sort(event_ids, resolved_power_event_id, event_map,
+ state_res_store):
+ """Returns a sorted list of event_ids sorted by mainline ordering based on
+ the given event resolved_power_event_id
+
+ Args:
+ event_ids (list[str]): Events to sort
+ resolved_power_event_id (str): The final resolved power level event ID
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[list[str]]: The sorted list
+ """
+ mainline = []
+ pl = resolved_power_event_id
+ while pl:
+ mainline.append(pl)
+ pl_ev = yield _get_event(pl, event_map, state_res_store)
+ auth_events = pl_ev.auth_event_ids()
+ pl = None
+ for aid in auth_events:
+ ev = yield _get_event(aid, event_map, state_res_store)
+ if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
+ pl = aid
+ break
+
+ mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(reversed(mainline))}
+
+ event_ids = list(event_ids)
+
+ order_map = {}
+ for ev_id in event_ids:
+ depth = yield _get_mainline_depth_for_event(
+ event_map[ev_id], mainline_map,
+ event_map, state_res_store,
+ )
+ order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id)
+
+ event_ids.sort(key=lambda ev_id: order_map[ev_id])
+
+ defer.returnValue(event_ids)
+
+
+@defer.inlineCallbacks
+def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_store):
+ """Get the mainline depths for the given event based on the mainline map
+
+ Args:
+ event (FrozenEvent)
+ mainline_map (dict[str, int]): Map from event_id to mainline depth for
+ events in the mainline.
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[int]
+ """
+
+ # We do an iterative search, replacing `event with the power level in its
+ # auth events (if any)
+ while event:
+ depth = mainline_map.get(event.event_id)
+ if depth is not None:
+ defer.returnValue(depth)
+
+ auth_events = event.auth_event_ids()
+ event = None
+
+ for aid in auth_events:
+ aev = yield _get_event(aid, event_map, state_res_store)
+ if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
+ event = aev
+ break
+
+ # Didn't find a power level auth event, so we just return 0
+ defer.returnValue(0)
+
+
+@defer.inlineCallbacks
+def _get_event(event_id, event_map, state_res_store):
+ """Helper function to look up event in event_map, falling back to looking
+ it up in the store
+
+ Args:
+ event_id (str)
+ event_map (dict[str,FrozenEvent])
+ state_res_store (StateResolutionStore)
+
+ Returns:
+ Deferred[FrozenEvent]
+ """
+ if event_id not in event_map:
+ events = yield state_res_store.get_events([event_id], allow_rejected=True)
+ event_map.update(events)
+ defer.returnValue(event_map[event_id])
+
+
+def lexicographical_topological_sort(graph, key):
+ """Performs a lexicographic reverse topological sort on the graph.
+
+ This returns a reverse topological sort (i.e. if node A references B then B
+ appears before A in the sort), with ties broken lexicographically based on
+ return value of the `key` function.
+
+ NOTE: `graph` is modified during the sort.
+
+ Args:
+ graph (dict[str, set[str]]): A representation of the graph where each
+ node is a key in the dict and its value are the nodes edges.
+ key (func): A function that takes a node and returns a value that is
+ comparable and used to order nodes
+
+ Yields:
+ str: The next node in the topological sort
+ """
+
+ # Note, this is basically Kahn's algorithm except we look at nodes with no
+ # outgoing edges, c.f.
+ # https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
+ outdegree_map = graph
+ reverse_graph = {}
+
+ # Lists of nodes with zero out degree. Is actually a tuple of
+ # `(key(node), node)` so that sorting does the right thing
+ zero_outdegree = []
+
+ for node, edges in iteritems(graph):
+ if len(edges) == 0:
+ zero_outdegree.append((key(node), node))
+
+ reverse_graph.setdefault(node, set())
+ for edge in edges:
+ reverse_graph.setdefault(edge, set()).add(node)
+
+ # heapq is a built in implementation of a sorted queue.
+ heapq.heapify(zero_outdegree)
+
+ while zero_outdegree:
+ _, node = heapq.heappop(zero_outdegree)
+
+ for parent in reverse_graph[node]:
+ out = outdegree_map[parent]
+ out.discard(node)
+ if len(out) == 0:
+ heapq.heappush(zero_outdegree, (key(parent), parent))
+
+ yield node
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 23b4a8d7..53c685c1 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -30,6 +30,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt
from .client_ips import ClientIpStore
from .deviceinbox import DeviceInboxStore
from .directory import DirectoryStore
+from .e2e_room_keys import EndToEndRoomKeyStore
from .end_to_end_keys import EndToEndKeyStore
from .engines import PostgresEngine
from .event_federation import EventFederationStore
@@ -77,6 +78,7 @@ class DataStore(RoomMemberStore, RoomStore,
ApplicationServiceTransactionStore,
ReceiptsStore,
EndToEndKeyStore,
+ EndToEndRoomKeyStore,
SearchStore,
TagsStore,
AccountDataStore,
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index be61147b..d9d0255d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -18,7 +18,7 @@ import threading
import time
from six import PY2, iteritems, iterkeys, itervalues
-from six.moves import intern, range
+from six.moves import builtins, intern, range
from canonicaljson import json
from prometheus_client import Histogram
@@ -1233,7 +1233,7 @@ def db_to_json(db_content):
# psycopg2 on Python 2 returns buffer objects, which we need to cast to
# bytes to decode
- if PY2 and isinstance(db_content, buffer):
+ if PY2 and isinstance(db_content, builtins.buffer):
db_content = bytes(db_content)
# Decode it to a Unicode string before feeding it to json.loads, so we
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index 8fc678fa..9ad17b7c 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -119,21 +119,25 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
for entry in iteritems(to_update):
(user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
- self._simple_upsert_txn(
- txn,
- table="user_ips",
- keyvalues={
- "user_id": user_id,
- "access_token": access_token,
- "ip": ip,
- "user_agent": user_agent,
- "device_id": device_id,
- },
- values={
- "last_seen": last_seen,
- },
- lock=False,
- )
+ try:
+ self._simple_upsert_txn(
+ txn,
+ table="user_ips",
+ keyvalues={
+ "user_id": user_id,
+ "access_token": access_token,
+ "ip": ip,
+ "user_agent": user_agent,
+ "device_id": device_id,
+ },
+ values={
+ "last_seen": last_seen,
+ },
+ lock=False,
+ )
+ except Exception as e:
+ # Failed to upsert, log and continue
+ logger.error("Failed to insert client IP %r: %r", entry, e)
@defer.inlineCallbacks
def get_last_client_ip_by_device(self, user_id, device_id):
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index d10ff9e4..ecdab34e 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -22,14 +22,19 @@ from twisted.internet import defer
from synapse.api.errors import StoreError
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
-from ._base import Cache, SQLBaseStore, db_to_json
+from ._base import Cache, db_to_json
logger = logging.getLogger(__name__)
+DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
+ "drop_device_list_streams_non_unique_indexes"
+)
-class DeviceStore(SQLBaseStore):
+
+class DeviceStore(BackgroundUpdateStore):
def __init__(self, db_conn, hs):
super(DeviceStore, self).__init__(db_conn, hs)
@@ -52,6 +57,30 @@ class DeviceStore(SQLBaseStore):
columns=["user_id", "device_id"],
)
+ # create a unique index on device_lists_remote_cache
+ self.register_background_index_update(
+ "device_lists_remote_cache_unique_idx",
+ index_name="device_lists_remote_cache_unique_id",
+ table="device_lists_remote_cache",
+ columns=["user_id", "device_id"],
+ unique=True,
+ )
+
+ # And one on device_lists_remote_extremeties
+ self.register_background_index_update(
+ "device_lists_remote_extremeties_unique_idx",
+ index_name="device_lists_remote_extremeties_unique_idx",
+ table="device_lists_remote_extremeties",
+ columns=["user_id"],
+ unique=True,
+ )
+
+ # once they complete, we can remove the old non-unique indexes.
+ self.register_background_update_handler(
+ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
+ self._drop_device_list_streams_non_unique_indexes,
+ )
+
@defer.inlineCallbacks
def store_device(self, user_id, device_id,
initial_device_display_name):
@@ -239,7 +268,19 @@ class DeviceStore(SQLBaseStore):
def update_remote_device_list_cache_entry(self, user_id, device_id, content,
stream_id):
- """Updates a single user's device in the cache.
+ """Updates a single device in the cache of a remote user's devicelist.
+
+ Note: assumes that we are the only thread that can be updating this user's
+ device list.
+
+ Args:
+ user_id (str): User to update device list for
+ device_id (str): ID of decivice being updated
+ content (dict): new data on this device
+ stream_id (int): the version of the device list
+
+ Returns:
+ Deferred[None]
"""
return self.runInteraction(
"update_remote_device_list_cache_entry",
@@ -272,7 +313,11 @@ class DeviceStore(SQLBaseStore):
},
values={
"content": json.dumps(content),
- }
+ },
+
+ # we don't need to lock, because we assume we are the only thread
+ # updating this user's devices.
+ lock=False,
)
txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,))
@@ -289,11 +334,26 @@ class DeviceStore(SQLBaseStore):
},
values={
"stream_id": stream_id,
- }
+ },
+
+ # again, we can assume we are the only thread updating this user's
+ # extremity.
+ lock=False,
)
def update_remote_device_list_cache(self, user_id, devices, stream_id):
- """Replace the cache of the remote user's devices.
+ """Replace the entire cache of the remote user's devices.
+
+ Note: assumes that we are the only thread that can be updating this user's
+ device list.
+
+ Args:
+ user_id (str): User to update device list for
+ devices (list[dict]): list of device objects supplied over federation
+ stream_id (int): the version of the device list
+
+ Returns:
+ Deferred[None]
"""
return self.runInteraction(
"update_remote_device_list_cache",
@@ -338,7 +398,11 @@ class DeviceStore(SQLBaseStore):
},
values={
"stream_id": stream_id,
- }
+ },
+
+ # we don't need to lock, because we can assume we are the only thread
+ # updating this user's extremity.
+ lock=False,
)
def get_devices_by_remote(self, destination, from_stream_id):
@@ -589,10 +653,14 @@ class DeviceStore(SQLBaseStore):
combined list of changes to devices, and which destinations need to be
poked. `destination` may be None if no destinations need to be poked.
"""
+ # We do a group by here as there can be a large number of duplicate
+ # entries, since we throw away device IDs.
sql = """
- SELECT stream_id, user_id, destination FROM device_lists_stream
+ SELECT MAX(stream_id) AS stream_id, user_id, destination
+ FROM device_lists_stream
LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
WHERE ? < stream_id AND stream_id <= ?
+ GROUP BY user_id, destination
"""
return self._execute(
"get_all_device_list_changes_for_remotes", None,
@@ -718,3 +786,19 @@ class DeviceStore(SQLBaseStore):
"_prune_old_outbound_device_pokes",
_prune_txn,
)
+
+ @defer.inlineCallbacks
+ def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
+ def f(conn):
+ txn = conn.cursor()
+ txn.execute(
+ "DROP INDEX IF EXISTS device_lists_remote_cache_id"
+ )
+ txn.execute(
+ "DROP INDEX IF EXISTS device_lists_remote_extremeties_id"
+ )
+ txn.close()
+
+ yield self.runWithConnection(f)
+ yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES)
+ defer.returnValue(1)
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index 80819423..61a029a5 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -75,7 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore):
},
retcol="creator",
desc="get_room_alias_creator",
- allow_none=True
)
@cached(max_entries=5000)
@@ -91,7 +90,7 @@ class DirectoryWorkerStore(SQLBaseStore):
class DirectoryStore(DirectoryWorkerStore):
@defer.inlineCallbacks
def create_room_alias_association(self, room_alias, room_id, servers, creator=None):
- """ Creates an associatin between a room alias and room_id/servers
+ """ Creates an association between a room alias and room_id/servers
Args:
room_alias (RoomAlias)
diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py
new file mode 100644
index 00000000..16b7f005
--- /dev/null
+++ b/synapse/storage/e2e_room_keys.py
@@ -0,0 +1,335 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+
+from ._base import SQLBaseStore
+
+
+class EndToEndRoomKeyStore(SQLBaseStore):
+
+ @defer.inlineCallbacks
+ def get_e2e_room_key(self, user_id, version, room_id, session_id):
+ """Get the encrypted E2E room key for a given session from a given
+ backup version of room_keys. We only store the 'best' room key for a given
+ session at a given time, as determined by the handler.
+
+ Args:
+ user_id(str): the user whose backup we're querying
+ version(str): the version ID of the backup for the set of keys we're querying
+ room_id(str): the ID of the room whose keys we're querying.
+ This is a bit redundant as it's implied by the session_id, but
+ we include for consistency with the rest of the API.
+ session_id(str): the session whose room_key we're querying.
+
+ Returns:
+ A deferred dict giving the session_data and message metadata for
+ this room key.
+ """
+
+ row = yield self._simple_select_one(
+ table="e2e_room_keys",
+ keyvalues={
+ "user_id": user_id,
+ "version": version,
+ "room_id": room_id,
+ "session_id": session_id,
+ },
+ retcols=(
+ "first_message_index",
+ "forwarded_count",
+ "is_verified",
+ "session_data",
+ ),
+ desc="get_e2e_room_key",
+ )
+
+ row["session_data"] = json.loads(row["session_data"])
+
+ defer.returnValue(row)
+
+ @defer.inlineCallbacks
+ def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key):
+ """Replaces or inserts the encrypted E2E room key for a given session in
+ a given backup
+
+ Args:
+ user_id(str): the user whose backup we're setting
+ version(str): the version ID of the backup we're updating
+ room_id(str): the ID of the room whose keys we're setting
+ session_id(str): the session whose room_key we're setting
+ room_key(dict): the room_key being set
+ Raises:
+ StoreError
+ """
+
+ yield self._simple_upsert(
+ table="e2e_room_keys",
+ keyvalues={
+ "user_id": user_id,
+ "room_id": room_id,
+ "session_id": session_id,
+ },
+ values={
+ "version": version,
+ "first_message_index": room_key['first_message_index'],
+ "forwarded_count": room_key['forwarded_count'],
+ "is_verified": room_key['is_verified'],
+ "session_data": json.dumps(room_key['session_data']),
+ },
+ lock=False,
+ )
+
+ @defer.inlineCallbacks
+ def get_e2e_room_keys(
+ self, user_id, version, room_id=None, session_id=None
+ ):
+ """Bulk get the E2E room keys for a given backup, optionally filtered to a given
+ room, or a given session.
+
+ Args:
+ user_id(str): the user whose backup we're querying
+ version(str): the version ID of the backup for the set of keys we're querying
+ room_id(str): Optional. the ID of the room whose keys we're querying, if any.
+ If not specified, we return the keys for all the rooms in the backup.
+ session_id(str): Optional. the session whose room_key we're querying, if any.
+ If specified, we also require the room_id to be specified.
+ If not specified, we return all the keys in this version of
+ the backup (or for the specified room)
+
+ Returns:
+ A deferred list of dicts giving the session_data and message metadata for
+ these room keys.
+ """
+
+ try:
+ version = int(version)
+ except ValueError:
+ defer.returnValue({'rooms': {}})
+
+ keyvalues = {
+ "user_id": user_id,
+ "version": version,
+ }
+ if room_id:
+ keyvalues['room_id'] = room_id
+ if session_id:
+ keyvalues['session_id'] = session_id
+
+ rows = yield self._simple_select_list(
+ table="e2e_room_keys",
+ keyvalues=keyvalues,
+ retcols=(
+ "user_id",
+ "room_id",
+ "session_id",
+ "first_message_index",
+ "forwarded_count",
+ "is_verified",
+ "session_data",
+ ),
+ desc="get_e2e_room_keys",
+ )
+
+ sessions = {'rooms': {}}
+ for row in rows:
+ room_entry = sessions['rooms'].setdefault(row['room_id'], {"sessions": {}})
+ room_entry['sessions'][row['session_id']] = {
+ "first_message_index": row["first_message_index"],
+ "forwarded_count": row["forwarded_count"],
+ "is_verified": row["is_verified"],
+ "session_data": json.loads(row["session_data"]),
+ }
+
+ defer.returnValue(sessions)
+
+ @defer.inlineCallbacks
+ def delete_e2e_room_keys(
+ self, user_id, version, room_id=None, session_id=None
+ ):
+ """Bulk delete the E2E room keys for a given backup, optionally filtered to a given
+ room or a given session.
+
+ Args:
+ user_id(str): the user whose backup we're deleting from
+ version(str): the version ID of the backup for the set of keys we're deleting
+ room_id(str): Optional. the ID of the room whose keys we're deleting, if any.
+ If not specified, we delete the keys for all the rooms in the backup.
+ session_id(str): Optional. the session whose room_key we're querying, if any.
+ If specified, we also require the room_id to be specified.
+ If not specified, we delete all the keys in this version of
+ the backup (or for the specified room)
+
+ Returns:
+ A deferred of the deletion transaction
+ """
+
+ keyvalues = {
+ "user_id": user_id,
+ "version": version,
+ }
+ if room_id:
+ keyvalues['room_id'] = room_id
+ if session_id:
+ keyvalues['session_id'] = session_id
+
+ yield self._simple_delete(
+ table="e2e_room_keys",
+ keyvalues=keyvalues,
+ desc="delete_e2e_room_keys",
+ )
+
+ @staticmethod
+ def _get_current_version(txn, user_id):
+ txn.execute(
+ "SELECT MAX(version) FROM e2e_room_keys_versions "
+ "WHERE user_id=? AND deleted=0",
+ (user_id,)
+ )
+ row = txn.fetchone()
+ if not row:
+ raise StoreError(404, 'No current backup version')
+ return row[0]
+
+ def get_e2e_room_keys_version_info(self, user_id, version=None):
+ """Get info metadata about a version of our room_keys backup.
+
+ Args:
+ user_id(str): the user whose backup we're querying
+ version(str): Optional. the version ID of the backup we're querying about
+ If missing, we return the information about the current version.
+ Raises:
+ StoreError: with code 404 if there are no e2e_room_keys_versions present
+ Returns:
+ A deferred dict giving the info metadata for this backup version, with
+ fields including:
+ version(str)
+ algorithm(str)
+ auth_data(object): opaque dict supplied by the client
+ """
+
+ def _get_e2e_room_keys_version_info_txn(txn):
+ if version is None:
+ this_version = self._get_current_version(txn, user_id)
+ else:
+ try:
+ this_version = int(version)
+ except ValueError:
+ # Our versions are all ints so if we can't convert it to an integer,
+ # it isn't there.
+ raise StoreError(404, "No row found")
+
+ result = self._simple_select_one_txn(
+ txn,
+ table="e2e_room_keys_versions",
+ keyvalues={
+ "user_id": user_id,
+ "version": this_version,
+ "deleted": 0,
+ },
+ retcols=(
+ "version",
+ "algorithm",
+ "auth_data",
+ ),
+ )
+ result["auth_data"] = json.loads(result["auth_data"])
+ result["version"] = str(result["version"])
+ return result
+
+ return self.runInteraction(
+ "get_e2e_room_keys_version_info",
+ _get_e2e_room_keys_version_info_txn
+ )
+
+ def create_e2e_room_keys_version(self, user_id, info):
+ """Atomically creates a new version of this user's e2e_room_keys store
+ with the given version info.
+
+ Args:
+ user_id(str): the user whose backup we're creating a version
+ info(dict): the info about the backup version to be created
+
+ Returns:
+ A deferred string for the newly created version ID
+ """
+
+ def _create_e2e_room_keys_version_txn(txn):
+ txn.execute(
+ "SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?",
+ (user_id,)
+ )
+ current_version = txn.fetchone()[0]
+ if current_version is None:
+ current_version = '0'
+
+ new_version = str(int(current_version) + 1)
+
+ self._simple_insert_txn(
+ txn,
+ table="e2e_room_keys_versions",
+ values={
+ "user_id": user_id,
+ "version": new_version,
+ "algorithm": info["algorithm"],
+ "auth_data": json.dumps(info["auth_data"]),
+ },
+ )
+
+ return new_version
+
+ return self.runInteraction(
+ "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
+ )
+
+ def delete_e2e_room_keys_version(self, user_id, version=None):
+ """Delete a given backup version of the user's room keys.
+ Doesn't delete their actual key data.
+
+ Args:
+ user_id(str): the user whose backup version we're deleting
+ version(str): Optional. the version ID of the backup version we're deleting
+ If missing, we delete the current backup version info.
+ Raises:
+ StoreError: with code 404 if there are no e2e_room_keys_versions present,
+ or if the version requested doesn't exist.
+ """
+
+ def _delete_e2e_room_keys_version_txn(txn):
+ if version is None:
+ this_version = self._get_current_version(txn, user_id)
+ else:
+ this_version = version
+
+ return self._simple_update_one_txn(
+ txn,
+ table="e2e_room_keys_versions",
+ keyvalues={
+ "user_id": user_id,
+ "version": this_version,
+ },
+ updatevalues={
+ "deleted": 1,
+ }
+ )
+
+ return self.runInteraction(
+ "delete_e2e_room_keys_version",
+ _delete_e2e_room_keys_version_txn
+ )
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 1f1721e8..2a0f6cfc 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -40,7 +40,10 @@ class EndToEndKeyStore(SQLBaseStore):
allow_none=True,
)
- new_key_json = encode_canonical_json(device_keys)
+ # In py3 we need old_key_json to match new_key_json type. The DB
+ # returns unicode while encode_canonical_json returns bytes.
+ new_key_json = encode_canonical_json(device_keys).decode("utf-8")
+
if old_key_json == new_key_json:
return False
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 24345b20..d3b9dea1 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -376,33 +376,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
@defer.inlineCallbacks
def get_missing_events(self, room_id, earliest_events, latest_events,
- limit, min_depth):
+ limit):
ids = yield self.runInteraction(
"get_missing_events",
self._get_missing_events,
- room_id, earliest_events, latest_events, limit, min_depth
+ room_id, earliest_events, latest_events, limit,
)
-
events = yield self._get_events(ids)
-
- events = sorted(
- [ev for ev in events if ev.depth >= min_depth],
- key=lambda e: e.depth,
- )
-
- defer.returnValue(events[:limit])
+ defer.returnValue(events)
def _get_missing_events(self, txn, room_id, earliest_events, latest_events,
- limit, min_depth):
-
- earliest_events = set(earliest_events)
- front = set(latest_events) - earliest_events
+ limit):
- event_results = set()
+ seen_events = set(earliest_events)
+ front = set(latest_events) - seen_events
+ event_results = []
query = (
"SELECT prev_event_id FROM event_edges "
- "WHERE event_id = ? AND is_state = ? "
+ "WHERE room_id = ? AND event_id = ? AND is_state = ? "
"LIMIT ?"
)
@@ -411,18 +403,20 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
for event_id in front:
txn.execute(
query,
- (event_id, False, limit - len(event_results))
+ (room_id, event_id, False, limit - len(event_results))
)
- for e_id, in txn:
- new_front.add(e_id)
+ new_results = set(t[0] for t in txn) - seen_events
- new_front -= earliest_events
- new_front -= event_results
+ new_front |= new_results
+ seen_events |= new_results
+ event_results.extend(new_results)
front = new_front
- event_results |= new_front
+ # we built the list working backwards from latest_events; we now need to
+ # reverse it so that the events are approximately chronological.
+ event_results.reverse()
return event_results
@@ -483,7 +477,7 @@ class EventFederationStore(EventFederationWorkerStore):
"is_state": False,
}
for ev in events
- for e_id, _ in ev.prev_events
+ for e_id in ev.prev_event_ids()
],
)
@@ -516,7 +510,7 @@ class EventFederationStore(EventFederationWorkerStore):
txn.executemany(query, [
(e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
- for ev in events for e_id, _ in ev.prev_events
+ for ev in events for e_id in ev.prev_event_ids()
if not ev.internal_metadata.is_outlier()
])
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 8bf87f38..2047110b 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -34,10 +34,13 @@ from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
+from synapse.storage.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
+from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
@@ -203,7 +206,8 @@ def _retry_on_integrity_error(func):
# inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here)
-class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
+class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
+ BackgroundUpdateStore):
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
@@ -386,12 +390,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
for room_id, ev_ctx_rm in iteritems(events_by_room):
- # Work out new extremities by recursively adding and removing
- # the new events.
latest_event_ids = yield self.get_latest_event_ids_in_room(
room_id
)
- new_latest_event_ids = yield self._calculate_new_extremeties(
+ new_latest_event_ids = yield self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
@@ -400,6 +402,12 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
# No change in extremities, so no change in state
continue
+ # there should always be at least one forward extremity.
+ # (except during the initial persistence of the send_join
+ # results, in which case there will be no existing
+ # extremities, so we'll `continue` above and skip this bit.)
+ assert new_latest_event_ids, "No forward extremities left!"
+
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
@@ -408,7 +416,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
if len_1:
all_single_prev_not_state = all(
- len(event.prev_events) == 1
+ len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
@@ -432,7 +440,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
- prev_event_ids = set(e for e, _ in ev.prev_events)
+ prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
@@ -517,44 +525,79 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
@defer.inlineCallbacks
- def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids):
- """Calculates the new forward extremeties for a room given events to
+ def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
+ """Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
- new_latest_event_ids = set(latest_event_ids)
- # First, add all the new events to the list
- new_latest_event_ids.update(
- event.event_id for event, ctx in event_contexts
+
+ # we're only interested in new events which aren't outliers and which aren't
+ # being rejected.
+ new_events = [
+ event for event, ctx in event_contexts
if not event.internal_metadata.is_outlier() and not ctx.rejected
+ ]
+
+ # start with the existing forward extremities
+ result = set(latest_event_ids)
+
+ # add all the new events to the list
+ result.update(
+ event.event_id for event in new_events
)
- # Now remove all events that are referenced by the to-be-added events
- new_latest_event_ids.difference_update(
+
+ # Now remove all events which are prev_events of any of the new events
+ result.difference_update(
e_id
- for event, ctx in event_contexts
- for e_id, _ in event.prev_events
- if not event.internal_metadata.is_outlier() and not ctx.rejected
+ for event in new_events
+ for e_id in event.prev_event_ids()
)
- # And finally remove any events that are referenced by previously added
- # events.
- rows = yield self._simple_select_many_batch(
- table="event_edges",
- column="prev_event_id",
- iterable=list(new_latest_event_ids),
- retcols=["prev_event_id"],
- keyvalues={
- "is_state": False,
- },
- desc="_calculate_new_extremeties",
- )
+ # Finally, remove any events which are prev_events of any existing events.
+ existing_prevs = yield self._get_events_which_are_prevs(result)
+ result.difference_update(existing_prevs)
- new_latest_event_ids.difference_update(
- row["prev_event_id"] for row in rows
- )
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _get_events_which_are_prevs(self, event_ids):
+ """Filter the supplied list of event_ids to get those which are prev_events of
+ existing (non-outlier/rejected) events.
+
+ Args:
+ event_ids (Iterable[str]): event ids to filter
+
+ Returns:
+ Deferred[List[str]]: filtered event ids
+ """
+ results = []
- defer.returnValue(new_latest_event_ids)
+ def _get_events(txn, batch):
+ sql = """
+ SELECT prev_event_id
+ FROM event_edges
+ INNER JOIN events USING (event_id)
+ LEFT JOIN rejections USING (event_id)
+ WHERE
+ prev_event_id IN (%s)
+ AND NOT events.outlier
+ AND rejections.event_id IS NULL
+ """ % (
+ ",".join("?" for _ in batch),
+ )
+
+ txn.execute(sql, batch)
+ results.extend(r[0] for r in txn)
+
+ for chunk in batch_iter(event_ids, 100):
+ yield self.runInteraction(
+ "_get_events_which_are_prevs",
+ _get_events,
+ chunk,
+ )
+
+ defer.returnValue(results)
@defer.inlineCallbacks
def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids,
@@ -586,10 +629,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
the new current state is only returned if we've already calculated
it.
"""
-
- if not new_latest_event_ids:
- return
-
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
@@ -695,11 +734,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
# Ok, we need to defer to the state handler to resolve our state sets.
- def get_events(ev_ids):
- return self.get_events(
- ev_ids, get_prev_content=False, check_redacted=False,
- )
-
state_groups = {
sg: state_groups_map[sg] for sg in new_state_groups
}
@@ -709,7 +743,8 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
- room_id, room_version, state_groups, events_map, get_events
+ room_id, room_version, state_groups, events_map,
+ state_res_store=StateResolutionStore(self)
)
defer.returnValue((res.state, None))
@@ -818,6 +853,27 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
+ # We want to store event_auth mappings for rejected events, as they're
+ # used in state res v2.
+ # This is only necessary if the rejected event appears in an accepted
+ # event's auth chain, but its easier for now just to store them (and
+ # it doesn't take much storage compared to storing the entire event
+ # anyway).
+ self._simple_insert_many_txn(
+ txn,
+ table="event_auth",
+ values=[
+ {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "auth_id": auth_id,
+ }
+ for event, _ in events_and_contexts
+ for auth_id in event.auth_event_ids()
+ if event.is_state()
+ ],
+ )
+
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
events_and_contexts = self._store_rejected_events_txn(
@@ -930,6 +986,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
self._invalidate_cache_and_stream(
+ txn, self.get_room_summary, (room_id,)
+ )
+
+ self._invalidate_cache_and_stream(
txn, self.get_current_state_ids, (room_id,)
)
@@ -1289,21 +1349,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
txn, event.room_id, event.redacts
)
- self._simple_insert_many_txn(
- txn,
- table="event_auth",
- values=[
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "auth_id": auth_id,
- }
- for event, _ in events_and_contexts
- for auth_id, _ in event.auth_events
- if event.is_state()
- ],
- )
-
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
@@ -1886,20 +1931,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
")"
)
- # create an index on should_delete because later we'll be looking for
- # the should_delete / shouldn't_delete subsets
- txn.execute(
- "CREATE INDEX events_to_purge_should_delete"
- " ON events_to_purge(should_delete)",
- )
-
- # We do joins against events_to_purge for e.g. calculating state
- # groups to purge, etc., so lets make an index.
- txn.execute(
- "CREATE INDEX events_to_purge_id"
- " ON events_to_purge(event_id)",
- )
-
# First ensure that we're not about to delete all the forward extremeties
txn.execute(
"SELECT e.event_id, e.depth FROM events as e "
@@ -1926,19 +1957,45 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
should_delete_params = ()
if not delete_local_events:
should_delete_expr += " AND event_id NOT LIKE ?"
- should_delete_params += ("%:" + self.hs.hostname, )
+
+ # We include the parameter twice since we use the expression twice
+ should_delete_params += (
+ "%:" + self.hs.hostname,
+ "%:" + self.hs.hostname,
+ )
should_delete_params += (room_id, token.topological)
+ # Note that we insert events that are outliers and aren't going to be
+ # deleted, as nothing will happen to them.
txn.execute(
"INSERT INTO events_to_purge"
" SELECT event_id, %s"
" FROM events AS e LEFT JOIN state_events USING (event_id)"
- " WHERE e.room_id = ? AND topological_ordering < ?" % (
+ " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?"
+ % (
+ should_delete_expr,
should_delete_expr,
),
should_delete_params,
)
+
+ # We create the indices *after* insertion as that's a lot faster.
+
+ # create an index on should_delete because later we'll be looking for
+ # the should_delete / shouldn't_delete subsets
+ txn.execute(
+ "CREATE INDEX events_to_purge_should_delete"
+ " ON events_to_purge(should_delete)",
+ )
+
+ # We do joins against events_to_purge for e.g. calculating state
+ # groups to purge, etc., so lets make an index.
+ txn.execute(
+ "CREATE INDEX events_to_purge_id"
+ " ON events_to_purge(event_id)",
+ )
+
txn.execute(
"SELECT event_id, should_delete FROM events_to_purge"
)
@@ -1979,62 +2036,44 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
logger.info("[purge] finding redundant state groups")
- # Get all state groups that are only referenced by events that are
- # to be deleted.
- # This works by first getting state groups that we may want to delete,
- # joining against event_to_state_groups to get events that use that
- # state group, then left joining against events_to_purge again. Any
- # state group where the left join produce *no nulls* are referenced
- # only by events that are going to be purged.
+ # Get all state groups that are referenced by events that are to be
+ # deleted. We then go and check if they are referenced by other events
+ # or state groups, and if not we delete them.
txn.execute("""
- SELECT state_group FROM
- (
- SELECT DISTINCT state_group FROM events_to_purge
- INNER JOIN event_to_state_groups USING (event_id)
- ) AS sp
- INNER JOIN event_to_state_groups USING (state_group)
- LEFT JOIN events_to_purge AS ep USING (event_id)
- GROUP BY state_group
- HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0
+ SELECT DISTINCT state_group FROM events_to_purge
+ INNER JOIN event_to_state_groups USING (event_id)
""")
- state_rows = txn.fetchall()
- logger.info("[purge] found %i redundant state groups", len(state_rows))
-
- # make a set of the redundant state groups, so that we can look them up
- # efficiently
- state_groups_to_delete = set([sg for sg, in state_rows])
-
- # Now we get all the state groups that rely on these state groups
- logger.info("[purge] finding state groups which depend on redundant"
- " state groups")
- remaining_state_groups = []
- for i in range(0, len(state_rows), 100):
- chunk = [sg for sg, in state_rows[i:i + 100]]
- # look for state groups whose prev_state_group is one we are about
- # to delete
- rows = self._simple_select_many_txn(
- txn,
- table="state_group_edges",
- column="prev_state_group",
- iterable=chunk,
- retcols=["state_group"],
- keyvalues={},
- )
- remaining_state_groups.extend(
- row["state_group"] for row in rows
+ referenced_state_groups = set(sg for sg, in txn)
+ logger.info(
+ "[purge] found %i referenced state groups",
+ len(referenced_state_groups),
+ )
+
+ logger.info("[purge] finding state groups that can be deleted")
- # exclude state groups we are about to delete: no point in
- # updating them
- if row["state_group"] not in state_groups_to_delete
+ state_groups_to_delete, remaining_state_groups = (
+ self._find_unreferenced_groups_during_purge(
+ txn, referenced_state_groups,
)
+ )
+
+ logger.info(
+ "[purge] found %i state groups to delete",
+ len(state_groups_to_delete),
+ )
+
+ logger.info(
+ "[purge] de-delta-ing %i remaining state groups",
+ len(remaining_state_groups),
+ )
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(
- txn, [sg], types=None
+ txn, [sg],
)
curr_state = curr_state[sg]
@@ -2072,11 +2111,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
- state_rows
+ ((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
- state_rows
+ ((sg,) for sg in state_groups_to_delete),
)
logger.info("[purge] removing events from event_to_state_groups")
@@ -2172,6 +2211,85 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
logger.info("[purge] done")
+ def _find_unreferenced_groups_during_purge(self, txn, state_groups):
+ """Used when purging history to figure out which state groups can be
+ deleted and which need to be de-delta'ed (due to one of its prev groups
+ being scheduled for deletion).
+
+ Args:
+ txn
+ state_groups (set[int]): Set of state groups referenced by events
+ that are going to be deleted.
+
+ Returns:
+ tuple[set[int], set[int]]: The set of state groups that can be
+ deleted and the set of state groups that need to be de-delta'ed
+ """
+ # Graph of state group -> previous group
+ graph = {}
+
+ # Set of events that we have found to be referenced by events
+ referenced_groups = set()
+
+ # Set of state groups we've already seen
+ state_groups_seen = set(state_groups)
+
+ # Set of state groups to handle next.
+ next_to_search = set(state_groups)
+ while next_to_search:
+ # We bound size of groups we're looking up at once, to stop the
+ # SQL query getting too big
+ if len(next_to_search) < 100:
+ current_search = next_to_search
+ next_to_search = set()
+ else:
+ current_search = set(itertools.islice(next_to_search, 100))
+ next_to_search -= current_search
+
+ # Check if state groups are referenced
+ sql = """
+ SELECT DISTINCT state_group FROM event_to_state_groups
+ LEFT JOIN events_to_purge AS ep USING (event_id)
+ WHERE state_group IN (%s) AND ep.event_id IS NULL
+ """ % (",".join("?" for _ in current_search),)
+ txn.execute(sql, list(current_search))
+
+ referenced = set(sg for sg, in txn)
+ referenced_groups |= referenced
+
+ # We don't continue iterating up the state group graphs for state
+ # groups that are referenced.
+ current_search -= referenced
+
+ rows = self._simple_select_many_txn(
+ txn,
+ table="state_group_edges",
+ column="prev_state_group",
+ iterable=current_search,
+ keyvalues={},
+ retcols=("prev_state_group", "state_group",),
+ )
+
+ prevs = set(row["state_group"] for row in rows)
+ # We don't bother re-handling groups we've already seen
+ prevs -= state_groups_seen
+ next_to_search |= prevs
+ state_groups_seen |= prevs
+
+ for row in rows:
+ # Note: Each state group can have at most one prev group
+ graph[row["state_group"]] = row["prev_state_group"]
+
+ to_delete = state_groups_seen - referenced_groups
+
+ to_dedelta = set()
+ for sg in referenced_groups:
+ prev_sg = graph.get(sg)
+ if prev_sg and prev_sg in to_delete:
+ to_dedelta.add(sg)
+
+ return to_delete, to_dedelta
+
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index f5479776..8af17921 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -32,7 +32,7 @@ logger = logging.getLogger(__name__)
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
# despite being deprecated and removed in favor of memoryview
if six.PY2:
- db_binary_type = buffer
+ db_binary_type = six.moves.builtins.buffer
else:
db_binary_type = memoryview
@@ -134,6 +134,7 @@ class KeyStore(SQLBaseStore):
"""
key_id = "%s:%s" % (verify_key.alg, verify_key.version)
+ # XXX fix this to not need a lock (#3819)
def _txn(txn):
self._simple_upsert_txn(
txn,
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index c7899d7f..cf4104dc 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -33,19 +33,29 @@ class MonthlyActiveUsersStore(SQLBaseStore):
self._clock = hs.get_clock()
self.hs = hs
self.reserved_users = ()
+ # Do not add more reserved users than the total allowable number
+ self._initialise_reserved_users(
+ dbconn.cursor(),
+ hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value],
+ )
- @defer.inlineCallbacks
- def initialise_reserved_users(self, threepids):
- store = self.hs.get_datastore()
+ def _initialise_reserved_users(self, txn, threepids):
+ """Ensures that reserved threepids are accounted for in the MAU table, should
+ be called on start up.
+
+ Args:
+ txn (cursor):
+ threepids (list[dict]): List of threepid dicts to reserve
+ """
reserved_user_list = []
- # Do not add more reserved users than the total allowable number
- for tp in threepids[:self.hs.config.max_mau_value]:
- user_id = yield store.get_user_id_by_threepid(
+ for tp in threepids:
+ user_id = self.get_user_id_by_threepid_txn(
+ txn,
tp["medium"], tp["address"]
)
if user_id:
- yield self.upsert_monthly_active_user(user_id)
+ self.upsert_monthly_active_user_txn(txn, user_id)
reserved_user_list.append(user_id)
else:
logger.warning(
@@ -55,8 +65,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
@defer.inlineCallbacks
def reap_monthly_active_users(self):
- """
- Cleans out monthly active user table to ensure that no stale
+ """Cleans out monthly active user table to ensure that no stale
entries exist.
Returns:
@@ -147,16 +156,62 @@ class MonthlyActiveUsersStore(SQLBaseStore):
return self.runInteraction("count_users", _count_users)
@defer.inlineCallbacks
+ def get_registered_reserved_users_count(self):
+ """Of the reserved threepids defined in config, how many are associated
+ with registered users?
+
+ Returns:
+ Defered[int]: Number of real reserved users
+ """
+ count = 0
+ for tp in self.hs.config.mau_limits_reserved_threepids:
+ user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
+ tp["medium"], tp["address"]
+ )
+ if user_id:
+ count = count + 1
+ defer.returnValue(count)
+
+ @defer.inlineCallbacks
def upsert_monthly_active_user(self, user_id):
+ """Updates or inserts the user into the monthly active user table, which
+ is used to track the current MAU usage of the server
+
+ Args:
+ user_id (str): user to add/update
"""
- Updates or inserts monthly active user member
- Arguments:
- user_id (str): user to add/update
- Deferred[bool]: True if a new entry was created, False if an
- existing one was updated.
+ is_insert = yield self.runInteraction(
+ "upsert_monthly_active_user", self.upsert_monthly_active_user_txn,
+ user_id
+ )
+
+ if is_insert:
+ self.user_last_seen_monthly_active.invalidate((user_id,))
+ self.get_monthly_active_count.invalidate(())
+
+ def upsert_monthly_active_user_txn(self, txn, user_id):
+ """Updates or inserts monthly active user member
+
+ Note that, after calling this method, it will generally be necessary
+ to invalidate the caches on user_last_seen_monthly_active and
+ get_monthly_active_count. We can't do that here, because we are running
+ in a database thread rather than the main thread, and we can't call
+ txn.call_after because txn may not be a LoggingTransaction.
+
+ Args:
+ txn (cursor):
+ user_id (str): user to add/update
+
+ Returns:
+ bool: True if a new entry was created, False if an
+ existing one was updated.
"""
- is_insert = yield self._simple_upsert(
- desc="upsert_monthly_active_user",
+ # Am consciously deciding to lock the table on the basis that is ought
+ # never be a big table and alternative approaches (batching multiple
+ # upserts into a single txn) introduced a lot of extra complexity.
+ # See https://github.com/matrix-org/synapse/issues/3854 for more
+ is_insert = self._simple_upsert_txn(
+ txn,
table="monthly_active_users",
keyvalues={
"user_id": user_id,
@@ -164,11 +219,9 @@ class MonthlyActiveUsersStore(SQLBaseStore):
values={
"timestamp": int(self._clock.time_msec()),
},
- lock=False,
)
- if is_insert:
- self.user_last_seen_monthly_active.invalidate((user_id,))
- self.get_monthly_active_count.invalidate(())
+
+ return is_insert
@cached(num_args=1)
def user_last_seen_monthly_active(self, user_id):
@@ -199,10 +252,14 @@ class MonthlyActiveUsersStore(SQLBaseStore):
Args:
user_id(str): the user_id to query
"""
+
if self.hs.config.limit_usage_by_mau:
+ # Trial users and guests should not be included as part of MAU group
+ is_guest = yield self.is_guest(user_id)
+ if is_guest:
+ return
is_trial = yield self.is_trial_user(user_id)
if is_trial:
- # we don't track trial users in the MAU table.
return
last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index b3647193..bd740e1e 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 51
+SCHEMA_VERSION = 52
dir_path = os.path.abspath(os.path.dirname(__file__))
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index c7987bfc..2743b52b 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -29,7 +29,7 @@ from ._base import SQLBaseStore
logger = logging.getLogger(__name__)
if six.PY2:
- db_binary_type = buffer
+ db_binary_type = six.moves.builtins.buffer
else:
db_binary_type = memoryview
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 26b429e3..80d76bf9 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -474,17 +474,44 @@ class RegistrationStore(RegistrationWorkerStore,
@defer.inlineCallbacks
def get_user_id_by_threepid(self, medium, address):
- ret = yield self._simple_select_one(
+ """Returns user id from threepid
+
+ Args:
+ medium (str): threepid medium e.g. email
+ address (str): threepid address e.g. me@example.com
+
+ Returns:
+ Deferred[str|None]: user id or None if no user id/threepid mapping exists
+ """
+ user_id = yield self.runInteraction(
+ "get_user_id_by_threepid", self.get_user_id_by_threepid_txn,
+ medium, address
+ )
+ defer.returnValue(user_id)
+
+ def get_user_id_by_threepid_txn(self, txn, medium, address):
+ """Returns user id from threepid
+
+ Args:
+ txn (cursor):
+ medium (str): threepid medium e.g. email
+ address (str): threepid address e.g. me@example.com
+
+ Returns:
+ str|None: user id or None if no user id/threepid mapping exists
+ """
+ ret = self._simple_select_one_txn(
+ txn,
"user_threepids",
{
"medium": medium,
"address": address
},
- ['user_id'], True, 'get_user_id_by_threepid'
+ ['user_id'], True
)
if ret:
- defer.returnValue(ret['user_id'])
- defer.returnValue(None)
+ return ret['user_id']
+ return None
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
@@ -567,7 +594,7 @@ class RegistrationStore(RegistrationWorkerStore,
def _find_next_generated_user_id(txn):
txn.execute("SELECT name FROM users")
- regex = re.compile("^@(\d+):")
+ regex = re.compile(r"^@(\d+):")
found = set()
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 61013b89..41c65e11 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore):
Args:
room_id (str): The ID of the room to retrieve.
Returns:
- A namedtuple containing the room information, or an empty list.
+ A dict containing the room information, or None if the room is unknown.
"""
return self._simple_select_one(
table="rooms",
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 9b4e6d6a..0707f9a8 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -51,6 +51,12 @@ ProfileInfo = namedtuple(
"ProfileInfo", ("avatar_url", "display_name")
)
+# "members" points to a truncated list of (user_id, event_id) tuples for users of
+# a given membership type, suitable for use in calculating heroes for a room.
+# "count" points to the total numberr of users of a given membership type.
+MemberSummary = namedtuple(
+ "MemberSummary", ("members", "count")
+)
_MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update"
@@ -82,6 +88,65 @@ class RoomMemberWorkerStore(EventsWorkerStore):
return [to_ascii(r[0]) for r in txn]
return self.runInteraction("get_users_in_room", f)
+ @cached(max_entries=100000)
+ def get_room_summary(self, room_id):
+ """ Get the details of a room roughly suitable for use by the room
+ summary extension to /sync. Useful when lazy loading room members.
+ Args:
+ room_id (str): The room ID to query
+ Returns:
+ Deferred[dict[str, MemberSummary]:
+ dict of membership states, pointing to a MemberSummary named tuple.
+ """
+
+ def _get_room_summary_txn(txn):
+ # first get counts.
+ # We do this all in one transaction to keep the cache small.
+ # FIXME: get rid of this when we have room_stats
+ sql = """
+ SELECT count(*), m.membership FROM room_memberships as m
+ INNER JOIN current_state_events as c
+ ON m.event_id = c.event_id
+ AND m.room_id = c.room_id
+ AND m.user_id = c.state_key
+ WHERE c.type = 'm.room.member' AND c.room_id = ?
+ GROUP BY m.membership
+ """
+
+ txn.execute(sql, (room_id,))
+ res = {}
+ for count, membership in txn:
+ summary = res.setdefault(to_ascii(membership), MemberSummary([], count))
+
+ # we order by membership and then fairly arbitrarily by event_id so
+ # heroes are consistent
+ sql = """
+ SELECT m.user_id, m.membership, m.event_id
+ FROM room_memberships as m
+ INNER JOIN current_state_events as c
+ ON m.event_id = c.event_id
+ AND m.room_id = c.room_id
+ AND m.user_id = c.state_key
+ WHERE c.type = 'm.room.member' AND c.room_id = ?
+ ORDER BY
+ CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
+ m.event_id ASC
+ LIMIT ?
+ """
+
+ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
+ txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
+ for user_id, membership, event_id in txn:
+ summary = res[to_ascii(membership)]
+ # we will always have a summary for this membership type at this
+ # point given the summary currently contains the counts.
+ members = summary.members
+ members.append((to_ascii(user_id), to_ascii(event_id)))
+
+ return res
+
+ return self.runInteraction("get_room_summary", _get_room_summary_txn)
+
@cached()
def get_invited_rooms_for_user(self, user_id):
""" Get all the rooms the user is invited to
diff --git a/synapse/storage/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/delta/40/device_list_streams.sql
index 54841b38..dd6dcb65 100644
--- a/synapse/storage/schema/delta/40/device_list_streams.sql
+++ b/synapse/storage/schema/delta/40/device_list_streams.sql
@@ -20,9 +20,6 @@ CREATE TABLE device_lists_remote_cache (
content TEXT NOT NULL
);
-CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
-
-
-- The last update we got for a user. Empty if we're not receiving updates for
-- that user.
CREATE TABLE device_lists_remote_extremeties (
@@ -30,7 +27,11 @@ CREATE TABLE device_lists_remote_extremeties (
stream_id TEXT NOT NULL
);
-CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
+-- we used to create non-unique indexes on these tables, but as of update 52 we create
+-- unique indexes concurrently:
+--
+-- CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
+-- CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
-- Stream of device lists updates. Includes both local and remotes
diff --git a/synapse/storage/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/delta/51/e2e_room_keys.sql
new file mode 100644
index 00000000..c0e66a69
--- /dev/null
+++ b/synapse/storage/schema/delta/51/e2e_room_keys.sql
@@ -0,0 +1,39 @@
+/* Copyright 2017 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- users' optionally backed up encrypted e2e sessions
+CREATE TABLE e2e_room_keys (
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ session_id TEXT NOT NULL,
+ version TEXT NOT NULL,
+ first_message_index INT,
+ forwarded_count INT,
+ is_verified BOOLEAN,
+ session_data TEXT NOT NULL
+);
+
+CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
+
+-- the metadata for each generation of encrypted e2e session backups
+CREATE TABLE e2e_room_keys_versions (
+ user_id TEXT NOT NULL,
+ version TEXT NOT NULL,
+ algorithm TEXT NOT NULL,
+ auth_data TEXT NOT NULL,
+ deleted SMALLINT DEFAULT 0 NOT NULL
+);
+
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
new file mode 100644
index 00000000..91e03d13
--- /dev/null
+++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This is needed to efficiently check for unreferenced state groups during
+-- purge. Added events_to_state_group(state_group) index
+INSERT into background_updates (update_name, progress_json)
+ VALUES ('event_to_state_groups_sg_index', '{}');
diff --git a/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql
new file mode 100644
index 00000000..bfa49e6f
--- /dev/null
+++ b/synapse/storage/schema/delta/52/device_list_streams_unique_idx.sql
@@ -0,0 +1,36 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- register a background update which will create a unique index on
+-- device_lists_remote_cache
+INSERT into background_updates (update_name, progress_json)
+ VALUES ('device_lists_remote_cache_unique_idx', '{}');
+
+-- and one on device_lists_remote_extremeties
+INSERT into background_updates (update_name, progress_json, depends_on)
+ VALUES (
+ 'device_lists_remote_extremeties_unique_idx', '{}',
+
+ -- doesn't really depend on this, but we need to make sure both happen
+ -- before we drop the old indexes.
+ 'device_lists_remote_cache_unique_idx'
+ );
+
+-- once they complete, we can drop the old indexes.
+INSERT into background_updates (update_name, progress_json, depends_on)
+ VALUES (
+ 'drop_device_list_streams_non_unique_indexes', '{}',
+ 'device_lists_remote_extremeties_unique_idx'
+ );
diff --git a/synapse/storage/schema/delta/52/e2e_room_keys.sql b/synapse/storage/schema/delta/52/e2e_room_keys.sql
new file mode 100644
index 00000000..db687ccc
--- /dev/null
+++ b/synapse/storage/schema/delta/52/e2e_room_keys.sql
@@ -0,0 +1,53 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Change version column to an integer so we can do MAX() sensibly
+ */
+CREATE TABLE e2e_room_keys_versions_new (
+ user_id TEXT NOT NULL,
+ version BIGINT NOT NULL,
+ algorithm TEXT NOT NULL,
+ auth_data TEXT NOT NULL,
+ deleted SMALLINT DEFAULT 0 NOT NULL
+);
+
+INSERT INTO e2e_room_keys_versions_new
+ SELECT user_id, CAST(version as BIGINT), algorithm, auth_data, deleted FROM e2e_room_keys_versions;
+
+DROP TABLE e2e_room_keys_versions;
+ALTER TABLE e2e_room_keys_versions_new RENAME TO e2e_room_keys_versions;
+
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
+
+/* Change e2e_rooms_keys to match
+ */
+CREATE TABLE e2e_room_keys_new (
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ session_id TEXT NOT NULL,
+ version BIGINT NOT NULL,
+ first_message_index INT,
+ forwarded_count INT,
+ is_verified BOOLEAN,
+ session_data TEXT NOT NULL
+);
+
+INSERT INTO e2e_room_keys_new
+ SELECT user_id, room_id, session_id, CAST(version as BIGINT), first_message_index, forwarded_count, is_verified, session_data FROM e2e_room_keys;
+
+DROP TABLE e2e_room_keys;
+ALTER TABLE e2e_room_keys_new RENAME TO e2e_room_keys;
+
+CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index 5623391f..158e9dbe 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -27,7 +27,7 @@ from ._base import SQLBaseStore
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
# despite being deprecated and removed in favor of memoryview
if six.PY2:
- db_binary_type = buffer
+ db_binary_type = six.moves.builtins.buffer
else:
db_binary_type = memoryview
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 4b971efd..d737bd67 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -19,6 +19,8 @@ from collections import namedtuple
from six import iteritems, itervalues
from six.moves import range
+import attr
+
from twisted.internet import defer
from synapse.api.constants import EventTypes
@@ -48,6 +50,318 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt
return len(self.delta_ids) if self.delta_ids else 0
+@attr.s(slots=True)
+class StateFilter(object):
+ """A filter used when querying for state.
+
+ Attributes:
+ types (dict[str, set[str]|None]): Map from type to set of state keys (or
+ None). This specifies which state_keys for the given type to fetch
+ from the DB. If None then all events with that type are fetched. If
+ the set is empty then no events with that type are fetched.
+ include_others (bool): Whether to fetch events with types that do not
+ appear in `types`.
+ """
+
+ types = attr.ib()
+ include_others = attr.ib(default=False)
+
+ def __attrs_post_init__(self):
+ # If `include_others` is set we canonicalise the filter by removing
+ # wildcards from the types dictionary
+ if self.include_others:
+ self.types = {
+ k: v for k, v in iteritems(self.types)
+ if v is not None
+ }
+
+ @staticmethod
+ def all():
+ """Creates a filter that fetches everything.
+
+ Returns:
+ StateFilter
+ """
+ return StateFilter(types={}, include_others=True)
+
+ @staticmethod
+ def none():
+ """Creates a filter that fetches nothing.
+
+ Returns:
+ StateFilter
+ """
+ return StateFilter(types={}, include_others=False)
+
+ @staticmethod
+ def from_types(types):
+ """Creates a filter that only fetches the given types
+
+ Args:
+ types (Iterable[tuple[str, str|None]]): A list of type and state
+ keys to fetch. A state_key of None fetches everything for
+ that type
+
+ Returns:
+ StateFilter
+ """
+ type_dict = {}
+ for typ, s in types:
+ if typ in type_dict:
+ if type_dict[typ] is None:
+ continue
+
+ if s is None:
+ type_dict[typ] = None
+ continue
+
+ type_dict.setdefault(typ, set()).add(s)
+
+ return StateFilter(types=type_dict)
+
+ @staticmethod
+ def from_lazy_load_member_list(members):
+ """Creates a filter that returns all non-member events, plus the member
+ events for the given users
+
+ Args:
+ members (iterable[str]): Set of user IDs
+
+ Returns:
+ StateFilter
+ """
+ return StateFilter(
+ types={EventTypes.Member: set(members)},
+ include_others=True,
+ )
+
+ def return_expanded(self):
+ """Creates a new StateFilter where type wild cards have been removed
+ (except for memberships). The returned filter is a superset of the
+ current one, i.e. anything that passes the current filter will pass
+ the returned filter.
+
+ This helps the caching as the DictionaryCache knows if it has *all* the
+ state, but does not know if it has all of the keys of a particular type,
+ which makes wildcard lookups expensive unless we have a complete cache.
+ Hence, if we are doing a wildcard lookup, populate the cache fully so
+ that we can do an efficient lookup next time.
+
+ Note that since we have two caches, one for membership events and one for
+ other events, we can be a bit more clever than simply returning
+ `StateFilter.all()` if `has_wildcards()` is True.
+
+ We return a StateFilter where:
+ 1. the list of membership events to return is the same
+ 2. if there is a wildcard that matches non-member events we
+ return all non-member events
+
+ Returns:
+ StateFilter
+ """
+
+ if self.is_full():
+ # If we're going to return everything then there's nothing to do
+ return self
+
+ if not self.has_wildcards():
+ # If there are no wild cards, there's nothing to do
+ return self
+
+ if EventTypes.Member in self.types:
+ get_all_members = self.types[EventTypes.Member] is None
+ else:
+ get_all_members = self.include_others
+
+ has_non_member_wildcard = self.include_others or any(
+ state_keys is None
+ for t, state_keys in iteritems(self.types)
+ if t != EventTypes.Member
+ )
+
+ if not has_non_member_wildcard:
+ # If there are no non-member wild cards we can just return ourselves
+ return self
+
+ if get_all_members:
+ # We want to return everything.
+ return StateFilter.all()
+ else:
+ # We want to return all non-members, but only particular
+ # memberships
+ return StateFilter(
+ types={EventTypes.Member: self.types[EventTypes.Member]},
+ include_others=True,
+ )
+
+ def make_sql_filter_clause(self):
+ """Converts the filter to an SQL clause.
+
+ For example:
+
+ f = StateFilter.from_types([("m.room.create", "")])
+ clause, args = f.make_sql_filter_clause()
+ clause == "(type = ? AND state_key = ?)"
+ args == ['m.room.create', '']
+
+
+ Returns:
+ tuple[str, list]: The SQL string (may be empty) and arguments. An
+ empty SQL string is returned when the filter matches everything
+ (i.e. is "full").
+ """
+
+ where_clause = ""
+ where_args = []
+
+ if self.is_full():
+ return where_clause, where_args
+
+ if not self.include_others and not self.types:
+ # i.e. this is an empty filter, so we need to return a clause that
+ # will match nothing
+ return "1 = 2", []
+
+ # First we build up a lost of clauses for each type/state_key combo
+ clauses = []
+ for etype, state_keys in iteritems(self.types):
+ if state_keys is None:
+ clauses.append("(type = ?)")
+ where_args.append(etype)
+ continue
+
+ for state_key in state_keys:
+ clauses.append("(type = ? AND state_key = ?)")
+ where_args.extend((etype, state_key))
+
+ # This will match anything that appears in `self.types`
+ where_clause = " OR ".join(clauses)
+
+ # If we want to include stuff that's not in the types dict then we add
+ # a `OR type NOT IN (...)` clause to the end.
+ if self.include_others:
+ if where_clause:
+ where_clause += " OR "
+
+ where_clause += "type NOT IN (%s)" % (
+ ",".join(["?"] * len(self.types)),
+ )
+ where_args.extend(self.types)
+
+ return where_clause, where_args
+
+ def max_entries_returned(self):
+ """Returns the maximum number of entries this filter will return if
+ known, otherwise returns None.
+
+ For example a simple state filter asking for `("m.room.create", "")`
+ will return 1, whereas the default state filter will return None.
+
+ This is used to bail out early if the right number of entries have been
+ fetched.
+ """
+ if self.has_wildcards():
+ return None
+
+ return len(self.concrete_types())
+
+ def filter_state(self, state_dict):
+ """Returns the state filtered with by this StateFilter
+
+ Args:
+ state (dict[tuple[str, str], Any]): The state map to filter
+
+ Returns:
+ dict[tuple[str, str], Any]: The filtered state map
+ """
+ if self.is_full():
+ return dict(state_dict)
+
+ filtered_state = {}
+ for k, v in iteritems(state_dict):
+ typ, state_key = k
+ if typ in self.types:
+ state_keys = self.types[typ]
+ if state_keys is None or state_key in state_keys:
+ filtered_state[k] = v
+ elif self.include_others:
+ filtered_state[k] = v
+
+ return filtered_state
+
+ def is_full(self):
+ """Whether this filter fetches everything or not
+
+ Returns:
+ bool
+ """
+ return self.include_others and not self.types
+
+ def has_wildcards(self):
+ """Whether the filter includes wildcards or is attempting to fetch
+ specific state.
+
+ Returns:
+ bool
+ """
+
+ return (
+ self.include_others
+ or any(
+ state_keys is None
+ for state_keys in itervalues(self.types)
+ )
+ )
+
+ def concrete_types(self):
+ """Returns a list of concrete type/state_keys (i.e. not None) that
+ will be fetched. This will be a complete list if `has_wildcards`
+ returns False, but otherwise will be a subset (or even empty).
+
+ Returns:
+ list[tuple[str,str]]
+ """
+ return [
+ (t, s)
+ for t, state_keys in iteritems(self.types)
+ if state_keys is not None
+ for s in state_keys
+ ]
+
+ def get_member_split(self):
+ """Return the filter split into two: one which assumes it's exclusively
+ matching against member state, and one which assumes it's matching
+ against non member state.
+
+ This is useful due to the returned filters giving correct results for
+ `is_full()`, `has_wildcards()`, etc, when operating against maps that
+ either exclusively contain member events or only contain non-member
+ events. (Which is the case when dealing with the member vs non-member
+ state caches).
+
+ Returns:
+ tuple[StateFilter, StateFilter]: The member and non member filters
+ """
+
+ if EventTypes.Member in self.types:
+ state_keys = self.types[EventTypes.Member]
+ if state_keys is None:
+ member_filter = StateFilter.all()
+ else:
+ member_filter = StateFilter({EventTypes.Member: state_keys})
+ elif self.include_others:
+ member_filter = StateFilter.all()
+ else:
+ member_filter = StateFilter.none()
+
+ non_member_filter = StateFilter(
+ types={k: v for k, v in iteritems(self.types) if k != EventTypes.Member},
+ include_others=self.include_others,
+ )
+
+ return member_filter, non_member_filter
+
+
# this inherits from EventsWorkerStore because it calls self.get_events
class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
"""The parts of StateGroupStore that can be called from workers.
@@ -152,61 +466,41 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
# FIXME: how should this be cached?
- def get_filtered_current_state_ids(self, room_id, types, filtered_types=None):
+ def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()):
"""Get the current state event of a given type for a room based on the
current_state_events table. This may not be as up-to-date as the result
of doing a fresh state resolution as per state_handler.get_current_state
+
Args:
room_id (str)
- types (list[(Str, (Str|None))]): List of (type, state_key) tuples
- which are used to filter the state fetched. `state_key` may be
- None, which matches any `state_key`
- filtered_types (list[Str]|None): List of types to apply the above filter to.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
+
Returns:
- deferred: dict of (type, state_key) -> event
+ Deferred[dict[tuple[str, str], str]]: Map from type/state_key to
+ event ID.
"""
- include_other_types = False if filtered_types is None else True
-
def _get_filtered_current_state_ids_txn(txn):
results = {}
- sql = """SELECT type, state_key, event_id FROM current_state_events
- WHERE room_id = ? %s"""
- # Turns out that postgres doesn't like doing a list of OR's and
- # is about 1000x slower, so we just issue a query for each specific
- # type seperately.
- if types:
- clause_to_args = [
- (
- "AND type = ? AND state_key = ?",
- (etype, state_key)
- ) if state_key is not None else (
- "AND type = ?",
- (etype,)
- )
- for etype, state_key in types
- ]
-
- if include_other_types:
- unique_types = set(filtered_types)
- clause_to_args.append(
- (
- "AND type <> ? " * len(unique_types),
- list(unique_types)
- )
- )
- else:
- # If types is None we fetch all the state, and so just use an
- # empty where clause with no extra args.
- clause_to_args = [("", [])]
- for where_clause, where_args in clause_to_args:
- args = [room_id]
- args.extend(where_args)
- txn.execute(sql % (where_clause,), args)
- for row in txn:
- typ, state_key, event_id = row
- key = (intern_string(typ), intern_string(state_key))
- results[key] = event_id
+ sql = """
+ SELECT type, state_key, event_id FROM current_state_events
+ WHERE room_id = ?
+ """
+
+ where_clause, where_args = state_filter.make_sql_filter_clause()
+
+ if where_clause:
+ sql += " AND (%s)" % (where_clause,)
+
+ args = [room_id]
+ args.extend(where_args)
+ txn.execute(sql, args)
+ for row in txn:
+ typ, state_key, event_id = row
+ key = (intern_string(typ), intern_string(state_key))
+ results[key] = event_id
+
return results
return self.runInteraction(
@@ -255,7 +549,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
@defer.inlineCallbacks
- def get_state_groups_ids(self, room_id, event_ids):
+ def get_state_groups_ids(self, _room_id, event_ids):
+ """Get the event IDs of all the state for the state groups for the given events
+
+ Args:
+ _room_id (str): id of the room for these events
+ event_ids (iterable[str]): ids of the events
+
+ Returns:
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
+ """
if not event_ids:
defer.returnValue({})
@@ -270,7 +574,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
@defer.inlineCallbacks
def get_state_ids_for_group(self, state_group):
- """Get the state IDs for the given state group
+ """Get the event IDs of all the state in the given state group
Args:
state_group (int)
@@ -286,7 +590,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
def get_state_groups(self, room_id, event_ids):
""" Get the state groups for the given list of event_ids
- The return value is a dict mapping group names to lists of events.
+ Returns:
+ Deferred[dict[int, list[EventBase]]]:
+ dict of state_group_id -> list of state events.
"""
if not event_ids:
defer.returnValue({})
@@ -310,21 +616,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
})
@defer.inlineCallbacks
- def _get_state_groups_from_groups(self, groups, types, members=None):
+ def _get_state_groups_from_groups(self, groups, state_filter):
"""Returns the state groups for a given set of groups, filtering on
types of state events.
Args:
groups(list[int]): list of state group IDs to query
- types (Iterable[str, str|None]|None): list of 2-tuples of the form
- (`type`, `state_key`), where a `state_key` of `None` matches all
- state_keys for the `type`. If None, all types are returned.
- members (bool|None): If not None, then, in addition to any filtering
- implied by types, the results are also filtered to only include
- member events (if True), or to exclude member events (if False)
-
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
- dictionary state_group -> (dict of (type, state_key) -> event id)
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
"""
results = {}
@@ -332,19 +634,23 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
for chunk in chunks:
res = yield self.runInteraction(
"_get_state_groups_from_groups",
- self._get_state_groups_from_groups_txn, chunk, types, members,
+ self._get_state_groups_from_groups_txn, chunk, state_filter,
)
results.update(res)
defer.returnValue(results)
def _get_state_groups_from_groups_txn(
- self, txn, groups, types=None, members=None,
+ self, txn, groups, state_filter=StateFilter.all(),
):
results = {group: {} for group in groups}
- if types is not None:
- types = list(set(types)) # deduplicate types list
+ where_clause, where_args = state_filter.make_sql_filter_clause()
+
+ # Unless the filter clause is empty, we're going to append it after an
+ # existing where clause
+ if where_clause:
+ where_clause = " AND (%s)" % (where_clause,)
if isinstance(self.database_engine, PostgresEngine):
# Temporarily disable sequential scans in this transaction. This is
@@ -360,79 +666,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
# group for the given type, state_key.
# This may return multiple rows per (type, state_key), but last_value
# should be the same.
- sql = ("""
+ sql = """
WITH RECURSIVE state(state_group) AS (
VALUES(?::bigint)
UNION ALL
SELECT prev_state_group FROM state_group_edges e, state s
WHERE s.state_group = e.state_group
)
- SELECT type, state_key, last_value(event_id) OVER (
+ SELECT DISTINCT type, state_key, last_value(event_id) OVER (
PARTITION BY type, state_key ORDER BY state_group ASC
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
) AS event_id FROM state_groups_state
WHERE state_group IN (
SELECT state_group FROM state
)
- %s
- """)
-
- if members is True:
- sql += " AND type = '%s'" % (EventTypes.Member,)
- elif members is False:
- sql += " AND type <> '%s'" % (EventTypes.Member,)
-
- # Turns out that postgres doesn't like doing a list of OR's and
- # is about 1000x slower, so we just issue a query for each specific
- # type seperately.
- if types is not None:
- clause_to_args = [
- (
- "AND type = ? AND state_key = ?",
- (etype, state_key)
- ) if state_key is not None else (
- "AND type = ?",
- (etype,)
- )
- for etype, state_key in types
- ]
- else:
- # If types is None we fetch all the state, and so just use an
- # empty where clause with no extra args.
- clause_to_args = [("", [])]
+ """
- for where_clause, where_args in clause_to_args:
- for group in groups:
- args = [group]
- args.extend(where_args)
+ for group in groups:
+ args = [group]
+ args.extend(where_args)
- txn.execute(sql % (where_clause,), args)
- for row in txn:
- typ, state_key, event_id = row
- key = (typ, state_key)
- results[group][key] = event_id
+ txn.execute(sql + where_clause, args)
+ for row in txn:
+ typ, state_key, event_id = row
+ key = (typ, state_key)
+ results[group][key] = event_id
else:
- where_args = []
- where_clauses = []
- wildcard_types = False
- if types is not None:
- for typ in types:
- if typ[1] is None:
- where_clauses.append("(type = ?)")
- where_args.append(typ[0])
- wildcard_types = True
- else:
- where_clauses.append("(type = ? AND state_key = ?)")
- where_args.extend([typ[0], typ[1]])
-
- where_clause = "AND (%s)" % (" OR ".join(where_clauses))
- else:
- where_clause = ""
-
- if members is True:
- where_clause += " AND type = '%s'" % EventTypes.Member
- elif members is False:
- where_clause += " AND type <> '%s'" % EventTypes.Member
+ max_entries_returned = state_filter.max_entries_returned()
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
@@ -446,12 +706,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
# without the right indices (which we can't add until
# after we finish deduping state, which requires this func)
args = [next_group]
- if types:
- args.extend(where_args)
+ args.extend(where_args)
txn.execute(
"SELECT type, state_key, event_id FROM state_groups_state"
- " WHERE state_group = ? %s" % (where_clause,),
+ " WHERE state_group = ? " + where_clause,
args
)
results[group].update(
@@ -467,9 +726,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
# wildcards (i.e. Nones) in which case we have to do an exhaustive
# search
if (
- types is not None and
- not wildcard_types and
- len(results[group]) == len(types)
+ max_entries_returned is not None and
+ len(results[group]) == max_entries_returned
):
break
@@ -484,20 +742,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
return results
@defer.inlineCallbacks
- def get_state_for_events(self, event_ids, types, filtered_types=None):
+ def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
"""Given a list of event_ids and type tuples, return a list of state
- dicts for each event. The state dicts will only have the type/state_keys
- that are in the `types` list.
+ dicts for each event.
Args:
event_ids (list[string])
- types (list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
@@ -507,7 +759,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
groups = set(itervalues(event_to_groups))
- group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
+ group_to_state = yield self._get_state_for_groups(groups, state_filter)
state_event_map = yield self.get_events(
[ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
@@ -526,20 +778,15 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
defer.returnValue({event: event_to_state[event] for event in event_ids})
@defer.inlineCallbacks
- def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None):
+ def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
"""
Get the state dicts corresponding to a list of events, containing the event_ids
of the state events (as opposed to the events themselves)
Args:
event_ids(list(str)): events whose state should be returned
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
A deferred dict from event_id -> (type, state_key) -> event_id
@@ -549,7 +796,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
groups = set(itervalues(event_to_groups))
- group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
+ group_to_state = yield self._get_state_for_groups(groups, state_filter)
event_to_state = {
event_id: group_to_state[group]
@@ -559,45 +806,35 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
defer.returnValue({event: event_to_state[event] for event in event_ids})
@defer.inlineCallbacks
- def get_state_for_event(self, event_id, types=None, filtered_types=None):
+ def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
"""
Get the state dict corresponding to a particular event
Args:
event_id(str): event whose state should be returned
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
A deferred dict from (type, state_key) -> state_event
"""
- state_map = yield self.get_state_for_events([event_id], types, filtered_types)
+ state_map = yield self.get_state_for_events([event_id], state_filter)
defer.returnValue(state_map[event_id])
@defer.inlineCallbacks
- def get_state_ids_for_event(self, event_id, types=None, filtered_types=None):
+ def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
"""
Get the state dict corresponding to a particular event
Args:
event_id(str): event whose state should be returned
- types(list[(str, str|None)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. If `state_key` is None,
- all events are returned of the given type.
- May be None, which matches any key.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
A deferred dict from (type, state_key) -> state_event
"""
- state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types)
+ state_map = yield self.get_state_ids_for_events([event_id], state_filter)
defer.returnValue(state_map[event_id])
@cached(max_entries=50000)
@@ -628,18 +865,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
- def _get_some_state_from_cache(self, cache, group, types, filtered_types=None):
+ def _get_state_for_group_using_cache(self, cache, group, state_filter):
"""Checks if group is in cache. See `_get_state_for_groups`
Args:
cache(DictionaryCache): the state group cache to use
group(int): The state group to lookup
- types(list[str, str|None]): List of 2-tuples of the form
- (`type`, `state_key`), where a `state_key` of `None` matches all
- state_keys for the `type`.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns 2-tuple (`state_dict`, `got_all`).
`got_all` is a bool indicating if we successfully retrieved all
@@ -648,124 +881,102 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
"""
is_all, known_absent, state_dict_ids = cache.get(group)
- type_to_key = {}
+ if is_all or state_filter.is_full():
+ # Either we have everything or want everything, either way
+ # `is_all` tells us whether we've gotten everything.
+ return state_filter.filter_state(state_dict_ids), is_all
# tracks whether any of our requested types are missing from the cache
missing_types = False
- for typ, state_key in types:
- key = (typ, state_key)
-
- if (
- state_key is None or
- (filtered_types is not None and typ not in filtered_types)
- ):
- type_to_key[typ] = None
- # we mark the type as missing from the cache because
- # when the cache was populated it might have been done with a
- # restricted set of state_keys, so the wildcard will not work
- # and the cache may be incomplete.
- missing_types = True
- else:
- if type_to_key.get(typ, object()) is not None:
- type_to_key.setdefault(typ, set()).add(state_key)
-
+ if state_filter.has_wildcards():
+ # We don't know if we fetched all the state keys for the types in
+ # the filter that are wildcards, so we have to assume that we may
+ # have missed some.
+ missing_types = True
+ else:
+ # There aren't any wild cards, so `concrete_types()` returns the
+ # complete list of event types we're wanting.
+ for key in state_filter.concrete_types():
if key not in state_dict_ids and key not in known_absent:
missing_types = True
+ break
- sentinel = object()
-
- def include(typ, state_key):
- valid_state_keys = type_to_key.get(typ, sentinel)
- if valid_state_keys is sentinel:
- return filtered_types is not None and typ not in filtered_types
- if valid_state_keys is None:
- return True
- if state_key in valid_state_keys:
- return True
- return False
-
- got_all = is_all
- if not got_all:
- # the cache is incomplete. We may still have got all the results we need, if
- # we don't have any wildcards in the match list.
- if not missing_types and filtered_types is None:
- got_all = True
-
- return {
- k: v for k, v in iteritems(state_dict_ids)
- if include(k[0], k[1])
- }, got_all
-
- def _get_all_state_from_cache(self, cache, group):
- """Checks if group is in cache. See `_get_state_for_groups`
-
- Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool
- indicating if we successfully retrieved all requests state from the
- cache, if False we need to query the DB for the missing state.
-
- Args:
- cache(DictionaryCache): the state group cache to use
- group: The state group to lookup
- """
- is_all, _, state_dict_ids = cache.get(group)
-
- return state_dict_ids, is_all
+ return state_filter.filter_state(state_dict_ids), not missing_types
@defer.inlineCallbacks
- def _get_state_for_groups(self, groups, types=None, filtered_types=None):
+ def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
Args:
groups (iterable[int]): list of state groups for which we want
to get the state.
- types (None|iterable[(str, None|str)]):
- indicates the state type/keys required. If None, the whole
- state is fetched and returned.
-
- Otherwise, each entry should be a `(type, state_key)` tuple to
- include in the response. A `state_key` of None is a wildcard
- meaning that we require all state with that type.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
-
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
- Deferred[dict[int, dict[(type, state_key), EventBase]]]
- a dictionary mapping from state group to state dictionary.
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
"""
- if types is not None:
- non_member_types = [t for t in types if t[0] != EventTypes.Member]
-
- if filtered_types is not None and EventTypes.Member not in filtered_types:
- # we want all of the membership events
- member_types = None
- else:
- member_types = [t for t in types if t[0] == EventTypes.Member]
- else:
- non_member_types = None
- member_types = None
+ member_filter, non_member_filter = state_filter.get_member_split()
- non_member_state = yield self._get_state_for_groups_using_cache(
- groups, self._state_group_cache, non_member_types, filtered_types,
+ # Now we look them up in the member and non-member caches
+ non_member_state, incomplete_groups_nm, = (
+ yield self._get_state_for_groups_using_cache(
+ groups, self._state_group_cache,
+ state_filter=non_member_filter,
+ )
)
- # XXX: we could skip this entirely if member_types is []
- member_state = yield self._get_state_for_groups_using_cache(
- # we set filtered_types=None as member_state only ever contain members.
- groups, self._state_group_members_cache, member_types, None,
+
+ member_state, incomplete_groups_m, = (
+ yield self._get_state_for_groups_using_cache(
+ groups, self._state_group_members_cache,
+ state_filter=member_filter,
+ )
)
- state = non_member_state
+ state = dict(non_member_state)
for group in groups:
state[group].update(member_state[group])
+ # Now fetch any missing groups from the database
+
+ incomplete_groups = incomplete_groups_m | incomplete_groups_nm
+
+ if not incomplete_groups:
+ defer.returnValue(state)
+
+ cache_sequence_nm = self._state_group_cache.sequence
+ cache_sequence_m = self._state_group_members_cache.sequence
+
+ # Help the cache hit ratio by expanding the filter a bit
+ db_state_filter = state_filter.return_expanded()
+
+ group_to_state_dict = yield self._get_state_groups_from_groups(
+ list(incomplete_groups),
+ state_filter=db_state_filter,
+ )
+
+ # Now lets update the caches
+ self._insert_into_cache(
+ group_to_state_dict,
+ db_state_filter,
+ cache_seq_num_members=cache_sequence_m,
+ cache_seq_num_non_members=cache_sequence_nm,
+ )
+
+ # And finally update the result dict, by filtering out any extra
+ # stuff we pulled out of the database.
+ for group, group_state_dict in iteritems(group_to_state_dict):
+ # We just replace any existing entries, as we will have loaded
+ # everything we need from the database anyway.
+ state[group] = state_filter.filter_state(group_state_dict)
+
defer.returnValue(state)
- @defer.inlineCallbacks
def _get_state_for_groups_using_cache(
- self, groups, cache, types=None, filtered_types=None
+ self, groups, cache, state_filter,
):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key, querying from a specific cache.
@@ -776,89 +987,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
cache (DictionaryCache): the cache of group ids to state dicts which
we will pass through - either the normal state cache or the specific
members state cache.
- types (None|iterable[(str, None|str)]):
- indicates the state type/keys required. If None, the whole
- state is fetched and returned.
-
- Otherwise, each entry should be a `(type, state_key)` tuple to
- include in the response. A `state_key` of None is a wildcard
- meaning that we require all state with that type.
- filtered_types(list[str]|None): Only apply filtering via `types` to this
- list of event types. Other types of events are returned unfiltered.
- If None, `types` filtering is applied to all events.
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
Returns:
- Deferred[dict[int, dict[(type, state_key), EventBase]]]
- a dictionary mapping from state group to state dictionary.
+ tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
+ of entries in the cache, and the state group ids either missing
+ from the cache or incomplete.
"""
- if types:
- types = frozenset(types)
results = {}
- missing_groups = []
- if types is not None:
- for group in set(groups):
- state_dict_ids, got_all = self._get_some_state_from_cache(
- cache, group, types, filtered_types
- )
- results[group] = state_dict_ids
+ incomplete_groups = set()
+ for group in set(groups):
+ state_dict_ids, got_all = self._get_state_for_group_using_cache(
+ cache, group, state_filter
+ )
+ results[group] = state_dict_ids
- if not got_all:
- missing_groups.append(group)
- else:
- for group in set(groups):
- state_dict_ids, got_all = self._get_all_state_from_cache(
- cache, group
- )
+ if not got_all:
+ incomplete_groups.add(group)
- results[group] = state_dict_ids
+ return results, incomplete_groups
- if not got_all:
- missing_groups.append(group)
+ def _insert_into_cache(self, group_to_state_dict, state_filter,
+ cache_seq_num_members, cache_seq_num_non_members):
+ """Inserts results from querying the database into the relevant cache.
- if missing_groups:
- # Okay, so we have some missing_types, let's fetch them.
- cache_seq_num = cache.sequence
+ Args:
+ group_to_state_dict (dict): The new entries pulled from database.
+ Map from state group to state dict
+ state_filter (StateFilter): The state filter used to fetch state
+ from the database.
+ cache_seq_num_members (int): Sequence number of member cache since
+ last lookup in cache
+ cache_seq_num_non_members (int): Sequence number of member cache since
+ last lookup in cache
+ """
- # the DictionaryCache knows if it has *all* the state, but
- # does not know if it has all of the keys of a particular type,
- # which makes wildcard lookups expensive unless we have a complete
- # cache. Hence, if we are doing a wildcard lookup, populate the
- # cache fully so that we can do an efficient lookup next time.
+ # We need to work out which types we've fetched from the DB for the
+ # member vs non-member caches. This should be as accurate as possible,
+ # but can be an underestimate (e.g. when we have wild cards)
- if filtered_types or (types and any(k is None for (t, k) in types)):
- types_to_fetch = None
- else:
- types_to_fetch = types
+ member_filter, non_member_filter = state_filter.get_member_split()
+ if member_filter.is_full():
+ # We fetched all member events
+ member_types = None
+ else:
+ # `concrete_types()` will only return a subset when there are wild
+ # cards in the filter, but that's fine.
+ member_types = member_filter.concrete_types()
- group_to_state_dict = yield self._get_state_groups_from_groups(
- missing_groups, types_to_fetch, cache == self._state_group_members_cache,
- )
+ if non_member_filter.is_full():
+ # We fetched all non member events
+ non_member_types = None
+ else:
+ non_member_types = non_member_filter.concrete_types()
- for group, group_state_dict in iteritems(group_to_state_dict):
- state_dict = results[group]
-
- # update the result, filtering by `types`.
- if types:
- for k, v in iteritems(group_state_dict):
- (typ, _) = k
- if (
- (k in types or (typ, None) in types) or
- (filtered_types and typ not in filtered_types)
- ):
- state_dict[k] = v
+ for group, group_state_dict in iteritems(group_to_state_dict):
+ state_dict_members = {}
+ state_dict_non_members = {}
+
+ for k, v in iteritems(group_state_dict):
+ if k[0] == EventTypes.Member:
+ state_dict_members[k] = v
else:
- state_dict.update(group_state_dict)
-
- # update the cache with all the things we fetched from the
- # database.
- cache.update(
- cache_seq_num,
- key=group,
- value=group_state_dict,
- fetched_keys=types_to_fetch,
- )
+ state_dict_non_members[k] = v
- defer.returnValue(results)
+ self._state_group_members_cache.update(
+ cache_seq_num_members,
+ key=group,
+ value=state_dict_members,
+ fetched_keys=member_types,
+ )
+
+ self._state_group_cache.update(
+ cache_seq_num_non_members,
+ key=group,
+ value=state_dict_non_members,
+ fetched_keys=non_member_types,
+ )
def store_state_group(self, event_id, room_id, prev_group, delta_ids,
current_state_ids):
@@ -1050,6 +1257,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
+ EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
def __init__(self, db_conn, hs):
super(StateStore, self).__init__(db_conn, hs)
@@ -1068,6 +1276,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
columns=["state_key"],
where_clause="type='m.room.member'",
)
+ self.register_background_index_update(
+ self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
+ index_name="event_to_state_groups_sg_index",
+ table="event_to_state_groups",
+ columns=["state_group"],
+ )
def _store_event_state_mappings_txn(self, txn, events_and_contexts):
state_groups = {}
@@ -1167,12 +1381,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
continue
prev_state = self._get_state_groups_from_groups_txn(
- txn, [prev_group], types=None
+ txn, [prev_group],
)
prev_state = prev_state[prev_group]
curr_state = self._get_state_groups_from_groups_txn(
- txn, [state_group], types=None
+ txn, [state_group],
)
curr_state = curr_state[state_group]
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 4c296d72..d6cfdba5 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -630,7 +630,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
@defer.inlineCallbacks
def get_all_new_events_stream(self, from_id, current_id, limit):
- """Get all new events"""
+ """Get all new events
+
+ Returns all events with from_id < stream_ordering <= current_id.
+
+ Args:
+ from_id (int): the stream_ordering of the last event we processed
+ current_id (int): the stream_ordering of the most recently processed event
+ limit (int): the maximum number of events to return
+
+ Returns:
+ Deferred[Tuple[int, list[FrozenEvent]]]: A tuple of (next_id, events), where
+ `next_id` is the next value to pass as `from_id` (it will either be the
+ stream_ordering of the last returned event, or, if fewer than `limit` events
+ were found, `current_id`.
+ """
def get_all_new_events_stream_txn(txn):
sql = (
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index 0c42bd33..d8bf953e 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -23,14 +23,14 @@ from canonicaljson import encode_canonical_json
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.expiringcache import ExpiringCache
from ._base import SQLBaseStore, db_to_json
# py2 sqlite has buffer hardcoded as only binary type, so we must use it,
# despite being deprecated and removed in favor of memoryview
if six.PY2:
- db_binary_type = buffer
+ db_binary_type = six.moves.builtins.buffer
else:
db_binary_type = memoryview
@@ -50,6 +50,8 @@ _UpdateTransactionRow = namedtuple(
)
)
+SENTINEL = object()
+
class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
@@ -60,6 +62,12 @@ class TransactionStore(SQLBaseStore):
self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
+ self._destination_retry_cache = ExpiringCache(
+ cache_name="get_destination_retry_timings",
+ clock=self._clock,
+ expiry_ms=5 * 60 * 1000,
+ )
+
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
@@ -156,7 +164,7 @@ class TransactionStore(SQLBaseStore):
"""
pass
- @cached(max_entries=10000)
+ @defer.inlineCallbacks
def get_destination_retry_timings(self, destination):
"""Gets the current retry timings (if any) for a given destination.
@@ -167,10 +175,20 @@ class TransactionStore(SQLBaseStore):
None if not retrying
Otherwise a dict for the retry scheme
"""
- return self.runInteraction(
+
+ result = self._destination_retry_cache.get(destination, SENTINEL)
+ if result is not SENTINEL:
+ defer.returnValue(result)
+
+ result = yield self.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings, destination)
+ # We don't hugely care about race conditions between getting and
+ # invalidating the cache, since we time out fairly quickly anyway.
+ self._destination_retry_cache[destination] = result
+ defer.returnValue(result)
+
def _get_destination_retry_timings(self, txn, destination):
result = self._simple_select_one_txn(
txn,
@@ -198,8 +216,7 @@ class TransactionStore(SQLBaseStore):
retry_interval (int) - how long until next retry in ms
"""
- # XXX: we could chose to not bother persisting this if our cache thinks
- # this is a NOOP
+ self._destination_retry_cache.pop(destination, None)
return self.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
@@ -212,10 +229,6 @@ class TransactionStore(SQLBaseStore):
retry_last_ts, retry_interval):
self.database_engine.lock_table(txn, "destinations")
- self._invalidate_cache_and_stream(
- txn, self.get_destination_retry_timings, (destination,)
- )
-
# We need to be careful here as the data may have changed from under us
# due to a worker setting the timings.
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 680ea928..0ae7e2ef 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -14,6 +14,7 @@
# limitations under the License.
import logging
+import re
from itertools import islice
import attr
@@ -68,7 +69,10 @@ class Clock(object):
"""
call = task.LoopingCall(f)
call.clock = self._reactor
- call.start(msec / 1000.0, now=False)
+ d = call.start(msec / 1000.0, now=False)
+ d.addErrback(
+ log_failure, "Looping call died", consumeErrors=False,
+ )
return call
def call_later(self, delay, callback, *args, **kwargs):
@@ -109,3 +113,53 @@ def batch_iter(iterable, size):
sourceiter = iter(iterable)
# call islice until it returns an empty tuple
return iter(lambda: tuple(islice(sourceiter, size)), ())
+
+
+def log_failure(failure, msg, consumeErrors=True):
+ """Creates a function suitable for passing to `Deferred.addErrback` that
+ logs any failures that occur.
+
+ Args:
+ msg (str): Message to log
+ consumeErrors (bool): If true consumes the failure, otherwise passes
+ on down the callback chain
+
+ Returns:
+ func(Failure)
+ """
+
+ logger.error(
+ msg,
+ exc_info=(
+ failure.type,
+ failure.value,
+ failure.getTracebackObject()
+ )
+ )
+
+ if not consumeErrors:
+ return failure
+
+
+def glob_to_regex(glob):
+ """Converts a glob to a compiled regex object.
+
+ The regex is anchored at the beginning and end of the string.
+
+ Args:
+ glob (str)
+
+ Returns:
+ re.RegexObject
+ """
+ res = ''
+ for c in glob:
+ if c == '*':
+ res = res + '.*'
+ elif c == '?':
+ res = res + '.'
+ else:
+ res = res + re.escape(c)
+
+ # \A anchors at start of string, \Z at end of string
+ return re.compile(r"\A" + res + r"\Z", re.IGNORECASE)
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 9b3f2f4b..ec7b2c96 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -374,29 +374,25 @@ class ReadWriteLock(object):
defer.returnValue(_ctx_manager())
-class DeferredTimeoutError(Exception):
- """
- This error is raised by default when a L{Deferred} times out.
- """
-
+def _cancelled_to_timed_out_error(value, timeout):
+ if isinstance(value, failure.Failure):
+ value.trap(CancelledError)
+ raise defer.TimeoutError(timeout, "Deferred")
+ return value
-def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
- """
- Add a timeout to a deferred by scheduling it to be cancelled after
- timeout seconds.
- This is essentially a backport of deferred.addTimeout, which was introduced
- in twisted 16.5.
+def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
+ """The in built twisted `Deferred.addTimeout` fails to time out deferreds
+ that have a canceller that throws exceptions. This method creates a new
+ deferred that wraps and times out the given deferred, correctly handling
+ the case where the given deferred's canceller throws.
- If the deferred gets timed out, it errbacks with a DeferredTimeoutError,
- unless a cancelable function was passed to its initialization or unless
- a different on_timeout_cancel callable is provided.
+ NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred
Args:
- deferred (defer.Deferred): deferred to be timed out
- timeout (Number): seconds to time out after
- reactor (twisted.internet.reactor): the Twisted reactor to use
-
+ deferred (Deferred)
+ timeout (float): Timeout in seconds
+ reactor (twisted.internet.reactor): The twisted reactor to use
on_timeout_cancel (callable): A callable which is called immediately
after the deferred times out, and not if this deferred is
otherwise cancelled before the timeout.
@@ -406,13 +402,26 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
the timeout.
The default callable (if none is provided) will translate a
- CancelledError Failure into a DeferredTimeoutError.
+ CancelledError Failure into a defer.TimeoutError.
+
+ Returns:
+ Deferred
"""
+
+ new_d = defer.Deferred()
+
timed_out = [False]
def time_it_out():
timed_out[0] = True
- deferred.cancel()
+
+ try:
+ deferred.cancel()
+ except: # noqa: E722, if we throw any exception it'll break time outs
+ logger.exception("Canceller failed during timeout")
+
+ if not new_d.called:
+ new_d.errback(defer.TimeoutError(timeout, "Deferred"))
delayed_call = reactor.callLater(timeout, time_it_out)
@@ -432,9 +441,14 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
deferred.addBoth(cancel_timeout)
+ def success_cb(val):
+ if not new_d.called:
+ new_d.callback(val)
-def _cancelled_to_timed_out_error(value, timeout):
- if isinstance(value, failure.Failure):
- value.trap(CancelledError)
- raise DeferredTimeoutError(timeout, "Deferred")
- return value
+ def failure_cb(val):
+ if not new_d.called:
+ new_d.errback(val)
+
+ deferred.addCallbacks(success_cb, failure_cb)
+
+ return new_d
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 7b065b19..f37d5bec 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import os
import six
@@ -20,6 +21,8 @@ from six.moves import intern
from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily
+logger = logging.getLogger(__name__)
+
CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5))
@@ -76,16 +79,20 @@ def register_cache(cache_type, cache_name, cache):
return []
def collect(self):
- if cache_type == "response_cache":
- response_cache_size.labels(cache_name).set(len(cache))
- response_cache_hits.labels(cache_name).set(self.hits)
- response_cache_evicted.labels(cache_name).set(self.evicted_size)
- response_cache_total.labels(cache_name).set(self.hits + self.misses)
- else:
- cache_size.labels(cache_name).set(len(cache))
- cache_hits.labels(cache_name).set(self.hits)
- cache_evicted.labels(cache_name).set(self.evicted_size)
- cache_total.labels(cache_name).set(self.hits + self.misses)
+ try:
+ if cache_type == "response_cache":
+ response_cache_size.labels(cache_name).set(len(cache))
+ response_cache_hits.labels(cache_name).set(self.hits)
+ response_cache_evicted.labels(cache_name).set(self.evicted_size)
+ response_cache_total.labels(cache_name).set(self.hits + self.misses)
+ else:
+ cache_size.labels(cache_name).set(len(cache))
+ cache_hits.labels(cache_name).set(self.hits)
+ cache_evicted.labels(cache_name).set(self.evicted_size)
+ cache_total.labels(cache_name).set(self.hits + self.misses)
+ except Exception as e:
+ logger.warn("Error calculating metrics for %s: %s", cache_name, e)
+ raise
yield GaugeMetricFamily("__unused", "")
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index ce85b2ae..f3697802 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -16,12 +16,17 @@
import logging
from collections import OrderedDict
+from six import iteritems, itervalues
+
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches import register_cache
logger = logging.getLogger(__name__)
+SENTINEL = object()
+
+
class ExpiringCache(object):
def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
reset_expiry_on_get=False, iterable=False):
@@ -54,11 +59,8 @@ class ExpiringCache(object):
self.iterable = iterable
- self._size_estimate = 0
-
self.metrics = register_cache("expiring", cache_name, self)
- def start(self):
if not self._expiry_ms:
# Don't bother starting the loop if things never expire
return
@@ -75,16 +77,11 @@ class ExpiringCache(object):
now = self._clock.time_msec()
self._cache[key] = _CacheEntry(now, value)
- if self.iterable:
- self._size_estimate += len(value)
-
# Evict if there are now too many items
while self._max_len and len(self) > self._max_len:
_key, value = self._cache.popitem(last=False)
if self.iterable:
- removed_len = len(value.value)
- self.metrics.inc_evictions(removed_len)
- self._size_estimate -= removed_len
+ self.metrics.inc_evictions(len(value.value))
else:
self.metrics.inc_evictions()
@@ -101,6 +98,21 @@ class ExpiringCache(object):
return entry.value
+ def pop(self, key, default=SENTINEL):
+ """Removes and returns the value with the given key from the cache.
+
+ If the key isn't in the cache then `default` will be returned if
+ specified, otherwise `KeyError` will get raised.
+
+ Identical functionality to `dict.pop(..)`.
+ """
+
+ value = self._cache.pop(key, default)
+ if value is SENTINEL:
+ raise KeyError(key)
+
+ return value
+
def __contains__(self, key):
return key in self._cache
@@ -128,14 +140,16 @@ class ExpiringCache(object):
keys_to_delete = set()
- for key, cache_entry in self._cache.items():
+ for key, cache_entry in iteritems(self._cache):
if now - cache_entry.time > self._expiry_ms:
keys_to_delete.add(key)
for k in keys_to_delete:
value = self._cache.pop(k)
if self.iterable:
- self._size_estimate -= len(value.value)
+ self.metrics.inc_evictions(len(value.value))
+ else:
+ self.metrics.inc_evictions()
logger.debug(
"[%s] _prune_cache before: %d, after len: %d",
@@ -144,12 +158,14 @@ class ExpiringCache(object):
def __len__(self):
if self.iterable:
- return self._size_estimate
+ return sum(len(entry.value) for entry in itervalues(self._cache))
else:
return len(self._cache)
class _CacheEntry(object):
+ __slots__ = ["time", "value"]
+
def __init__(self, time, value):
self.time = time
self.value = value
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index f2bde74d..625aedc9 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -15,6 +15,8 @@
import logging
+from six import integer_types
+
from sortedcontainers import SortedDict
from synapse.util import caches
@@ -47,7 +49,7 @@ class StreamChangeCache(object):
def has_entity_changed(self, entity, stream_pos):
"""Returns True if the entity may have been updated since stream_pos
"""
- assert type(stream_pos) is int or type(stream_pos) is long
+ assert type(stream_pos) in integer_types
if stream_pos < self._earliest_known_stream_pos:
self.metrics.inc_misses()
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index a0c2d376..4c6e92be 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -25,7 +25,7 @@ See doc/log_contexts.rst for details on how this works.
import logging
import threading
-from twisted.internet import defer
+from twisted.internet import defer, threads
logger = logging.getLogger(__name__)
@@ -200,7 +200,7 @@ class LoggingContext(object):
sentinel = Sentinel()
- def __init__(self, name=None, parent_context=None):
+ def __init__(self, name=None, parent_context=None, request=None):
self.previous_context = LoggingContext.current_context()
self.name = name
@@ -218,6 +218,13 @@ class LoggingContext(object):
self.parent_context = parent_context
+ if self.parent_context is not None:
+ self.parent_context.copy_to(self)
+
+ if request is not None:
+ # the request param overrides the request from the parent context
+ self.request = request
+
def __str__(self):
return "%s@%x" % (self.name, id(self))
@@ -256,9 +263,6 @@ class LoggingContext(object):
)
self.alive = True
- if self.parent_context is not None:
- self.parent_context.copy_to(self)
-
return self
def __exit__(self, type, value, traceback):
@@ -439,6 +443,35 @@ class PreserveLoggingContext(object):
)
+def nested_logging_context(suffix, parent_context=None):
+ """Creates a new logging context as a child of another.
+
+ The nested logging context will have a 'request' made up of the parent context's
+ request, plus the given suffix.
+
+ CPU/db usage stats will be added to the parent context's on exit.
+
+ Normal usage looks like:
+
+ with nested_logging_context(suffix):
+ # ... do stuff
+
+ Args:
+ suffix (str): suffix to add to the parent context's 'request'.
+ parent_context (LoggingContext|None): parent context. Will use the current context
+ if None.
+
+ Returns:
+ LoggingContext: new logging context.
+ """
+ if parent_context is None:
+ parent_context = LoggingContext.current_context()
+ return LoggingContext(
+ parent_context=parent_context,
+ request=parent_context.request + "-" + suffix,
+ )
+
+
def preserve_fn(f):
"""Function decorator which wraps the function with run_in_background"""
def g(*args, **kwargs):
@@ -529,58 +562,76 @@ def _set_context_cb(result, context):
return result
-# modules to ignore in `logcontext_tracer`
-_to_ignore = [
- "synapse.util.logcontext",
- "synapse.http.server",
- "synapse.storage._base",
- "synapse.util.async_helpers",
-]
+def defer_to_thread(reactor, f, *args, **kwargs):
+ """
+ Calls the function `f` using a thread from the reactor's default threadpool and
+ returns the result as a Deferred.
+
+ Creates a new logcontext for `f`, which is created as a child of the current
+ logcontext (so its CPU usage metrics will get attributed to the current
+ logcontext). `f` should preserve the logcontext it is given.
+ The result deferred follows the Synapse logcontext rules: you should `yield`
+ on it.
+
+ Args:
+ reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
+ the Deferred will be invoked, and whose threadpool we should use for the
+ function.
-def logcontext_tracer(frame, event, arg):
- """A tracer that logs whenever a logcontext "unexpectedly" changes within
- a function. Probably inaccurate.
+ Normally this will be hs.get_reactor().
- Use by calling `sys.settrace(logcontext_tracer)` in the main thread.
+ f (callable): The function to call.
+
+ args: positional arguments to pass to f.
+
+ kwargs: keyword arguments to pass to f.
+
+ Returns:
+ Deferred: A Deferred which fires a callback with the result of `f`, or an
+ errback if `f` throws an exception.
"""
- if event == 'call':
- name = frame.f_globals["__name__"]
- if name.startswith("synapse"):
- if name == "synapse.util.logcontext":
- if frame.f_code.co_name in ["__enter__", "__exit__"]:
- tracer = frame.f_back.f_trace
- if tracer:
- tracer.just_changed = True
-
- tracer = frame.f_trace
- if tracer:
- return tracer
-
- if not any(name.startswith(ig) for ig in _to_ignore):
- return LineTracer()
-
-
-class LineTracer(object):
- __slots__ = ["context", "just_changed"]
-
- def __init__(self):
- self.context = LoggingContext.current_context()
- self.just_changed = False
-
- def __call__(self, frame, event, arg):
- if event in 'line':
- if self.just_changed:
- self.context = LoggingContext.current_context()
- self.just_changed = False
- else:
- c = LoggingContext.current_context()
- if c != self.context:
- logger.info(
- "Context changed! %s -> %s, %s, %s",
- self.context, c,
- frame.f_code.co_filename, frame.f_lineno
- )
- self.context = c
+ return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
- return self
+
+def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
+ """
+ A wrapper for twisted.internet.threads.deferToThreadpool, which handles
+ logcontexts correctly.
+
+ Calls the function `f` using a thread from the given threadpool and returns
+ the result as a Deferred.
+
+ Creates a new logcontext for `f`, which is created as a child of the current
+ logcontext (so its CPU usage metrics will get attributed to the current
+ logcontext). `f` should preserve the logcontext it is given.
+
+ The result deferred follows the Synapse logcontext rules: you should `yield`
+ on it.
+
+ Args:
+ reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
+ the Deferred will be invoked. Normally this will be hs.get_reactor().
+
+ threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
+ running `f`. Normally this will be hs.get_reactor().getThreadPool().
+
+ f (callable): The function to call.
+
+ args: positional arguments to pass to f.
+
+ kwargs: keyword arguments to pass to f.
+
+ Returns:
+ Deferred: A Deferred which fires a callback with the result of `f`, or an
+ errback if `f` throws an exception.
+ """
+ logcontext = LoggingContext.current_context()
+
+ def g():
+ with LoggingContext(parent_context=logcontext):
+ return f(*args, **kwargs)
+
+ return make_deferred_yieldable(
+ threads.deferToThreadPool(reactor, threadpool, g)
+ )
diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py
index 14be3c73..9cb7e9c9 100644
--- a/synapse/util/manhole.py
+++ b/synapse/util/manhole.py
@@ -19,22 +19,40 @@ from twisted.conch.ssh.keys import Key
from twisted.cred import checkers, portal
PUBLIC_KEY = (
- "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az"
- "64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJS"
- "kbh/C+BR3utDS555mV"
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5"
+ "XALqeK+7385NlLja3DE/DO9mGhnd9+bAy39EKT3sTV6+WXQ4yD0TvEEyUEMtjWkSEm6U32+C"
+ "DaS3TW/vPBUMeJQwq+Ydcif1UlnpXrDDTamD0AU9VaEvHq+3HAkipqn0TGpKON6aqk4vauDx"
+ "oXSsV5TXBVrxP/y7HpMOpU4GUWsaaacBTKKNnUaQB4UflvydaPJUuwdaCUJGTMjbhWrjVfK+"
+ "jslseSPxU6XvrkZMyCr4znxvuDxjMk1RGIdO7v+rbBMLEgqtSMNqJbYeVCnj2CFgc3fcTcld"
+ "X2uOJDrJb/WRlHulthCh"
)
PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
-MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW
-4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw
-vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb
-Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1
-xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8
-PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2
-gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu
-DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML
-pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP
-EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg==
+MIIEpQIBAAKCAQEAx4RgE2luCoRNt/u56x+Ixcd8i6vTo2hLOVwC6nivu9/OTZS4
+2twxPwzvZhoZ3ffmwMt/RCk97E1evll0OMg9E7xBMlBDLY1pEhJulN9vgg2kt01v
+7zwVDHiUMKvmHXIn9VJZ6V6ww02pg9AFPVWhLx6vtxwJIqap9ExqSjjemqpOL2rg
+8aF0rFeU1wVa8T/8ux6TDqVOBlFrGmmnAUyijZ1GkAeFH5b8nWjyVLsHWglCRkzI
+24Vq41Xyvo7JbHkj8VOl765GTMgq+M58b7g8YzJNURiHTu7/q2wTCxIKrUjDaiW2
+HlQp49ghYHN33E3JXV9rjiQ6yW/1kZR7pbYQoQIDAQABAoIBAQC8KJ0q8Wzzwh5B
+esa1dQHZ8+4DEsL/Amae66VcVwD0X3cCN1W2IZ7X5W0Ij2kBqr8V51RYhcR+S+Ek
+BtzSiBUBvbKGrqcMGKaUgomDIMzai99hd0gvCCyZnEW1OQhFkNkaRNXCfqiZJ27M
+fqvSUiU2eOwh9fCvmxoA6Of8o3FbzcJ+1GMcobWRllDtLmj6lgVbDzuA+0jC5daB
+9Tj1pBzu3wn3ufxiS+gBnJ+7NcXH3E73lqCcPa2ufbZ1haxfiGCnRIhFXuQDgxFX
+vKdEfDgtvas6r1ahGbc+b/q8E8fZT7cABuIU4yfOORK+MhpyWbvoyyzuVGKj3PKt
+KSPJu5CZAoGBAOkoJfAVyYteqKcmGTanGqQnAY43CaYf6GdSPX/jg+JmKZg0zqMC
+jWZUtPb93i+jnOInbrnuHOiHAxI8wmhEPed28H2lC/LU8PzlqFkZXKFZ4vLOhhRB
+/HeHCFIDosPFlohWi3b+GAjD7sXgnIuGmnXWe2ea/TS3yersifDEoKKjAoGBANsQ
+gJX2cJv1c3jhdgcs8vAt5zIOKcCLTOr/QPmVf/kxjNgndswcKHwsxE/voTO9q+TF
+v/6yCSTxAdjuKz1oIYWgi/dZo82bBKWxNRpgrGviU3/zwxiHlyIXUhzQu78q3VS/
+7S1XVbc7qMV++XkYKHPVD+nVG/gGzFxumX7MLXfrAoGBAJit9cn2OnjNj9uFE1W6
+r7N254ndeLAUjPe73xH0RtTm2a4WRopwjW/JYIetTuYbWgyujc+robqTTuuOZjAp
+H/CG7o0Ym251CypQqaFO/l2aowclPp/dZhpPjp9GSjuxFBZLtiBB3DNBOwbRQzIK
+/vLTdRQvZkgzYkI4i0vjNt3JAoGBANP8HSKBLymMlShlrSx2b8TB9tc2Y2riohVJ
+2ttqs0M2kt/dGJWdrgOz4mikL+983Olt/0P9juHDoxEEMK2kpcPEv40lnmBpYU7h
+s8yJvnBLvJe2EJYdJ8AipyAhUX1FgpbvfxmASP8eaUxsegeXvBWTGWojAoS6N2o+
+0KSl+l3vAoGAFqm0gO9f/Q1Se60YQd4l2PZeMnJFv0slpgHHUwegmd6wJhOD7zJ1
+CkZcXwiv7Nog7AI9qKJEUXLjoqL+vJskBzSOqU3tcd670YQMi1aXSXJqYE202K7o
+EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs=
-----END RSA PRIVATE KEY-----"""
@@ -52,6 +70,8 @@ def manhole(username, password, globals):
Returns:
twisted.internet.protocol.Factory: A factory to pass to ``listenTCP``
"""
+ if not isinstance(password, bytes):
+ password = password.encode('ascii')
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
**{username: password}
@@ -64,7 +84,7 @@ def manhole(username, password, globals):
)
factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
- factory.publicKeys['ssh-rsa'] = Key.fromString(PUBLIC_KEY)
- factory.privateKeys['ssh-rsa'] = Key.fromString(PRIVATE_KEY)
+ factory.publicKeys[b'ssh-rsa'] = Key.fromString(PUBLIC_KEY)
+ factory.privateKeys[b'ssh-rsa'] = Key.fromString(PRIVATE_KEY)
return factory
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 97f12673..4b4ac5f6 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -20,6 +20,7 @@ from prometheus_client import Counter
from twisted.internet import defer
+from synapse.metrics import InFlightGauge
from synapse.util.logcontext import LoggingContext
logger = logging.getLogger(__name__)
@@ -45,6 +46,13 @@ block_db_txn_duration = Counter(
block_db_sched_duration = Counter(
"synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"])
+# Tracks the number of blocks currently active
+in_flight = InFlightGauge(
+ "synapse_util_metrics_block_in_flight", "",
+ labels=["block_name"],
+ sub_metrics=["real_time_max", "real_time_sum"],
+)
+
def measure_func(name):
def wrapper(func):
@@ -82,10 +90,14 @@ class Measure(object):
self.start_usage = self.start_context.get_resource_usage()
+ in_flight.register((self.name,), self._update_in_flight)
+
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(exc_type, Exception) or not self.start_context:
return
+ in_flight.unregister((self.name,), self._update_in_flight)
+
duration = self.clock.time() - self.start
block_counter.labels(self.name).inc()
@@ -120,3 +132,13 @@ class Measure(object):
if self.created_context:
self.start_context.__exit__(exc_type, exc_val, exc_tb)
+
+ def _update_in_flight(self, metrics):
+ """Gets called when processing in flight metrics
+ """
+ duration = self.clock.time() - self.start
+
+ metrics.real_time_max = max(metrics.real_time_max, duration)
+ metrics.real_time_sum += duration
+
+ # TODO: Add other in flight metrics.
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 8a3a06fd..26cce7d1 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -188,7 +188,7 @@ class RetryDestinationLimiter(object):
else:
self.retry_interval = self.min_retry_interval
- logger.debug(
+ logger.info(
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
self.destination, exc_type, exc_val, self.retry_interval
)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index d4680863..0281a7c9 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -23,6 +23,7 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events.utils import prune_event
+from synapse.storage.state import StateFilter
from synapse.types import get_domain_from_id
logger = logging.getLogger(__name__)
@@ -72,7 +73,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
)
event_id_to_state = yield store.get_state_for_events(
frozenset(e.event_id for e in events),
- types=types,
+ state_filter=StateFilter.from_types(types),
)
ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
@@ -219,7 +220,7 @@ def filter_events_for_server(store, server_name, events):
# Whatever else we do, we need to check for senders which have requested
# erasure of their data.
erased_senders = yield store.are_users_erased(
- e.sender for e in events,
+ (e.sender for e in events),
)
def redact_disallowed(event, state):
@@ -273,8 +274,8 @@ def filter_events_for_server(store, server_name, events):
# need to check membership (as we know the server is in the room).
event_to_state_ids = yield store.get_state_ids_for_events(
frozenset(e.event_id for e in events),
- types=(
- (EventTypes.RoomHistoryVisibility, ""),
+ state_filter=StateFilter.from_types(
+ types=((EventTypes.RoomHistoryVisibility, ""),),
)
)
@@ -314,9 +315,11 @@ def filter_events_for_server(store, server_name, events):
# of the history vis and membership state at those events.
event_to_state_ids = yield store.get_state_ids_for_events(
frozenset(e.event_id for e in events),
- types=(
- (EventTypes.RoomHistoryVisibility, ""),
- (EventTypes.Member, None),
+ state_filter=StateFilter.from_types(
+ types=(
+ (EventTypes.RoomHistoryVisibility, ""),
+ (EventTypes.Member, None),
+ ),
)
)
@@ -324,14 +327,13 @@ def filter_events_for_server(store, server_name, events):
# server's domain.
#
# event_to_state_ids contains lots of duplicates, so it turns out to be
- # cheaper to build a complete set of unique
- # ((type, state_key), event_id) tuples, and then filter out the ones we
- # don't want.
+ # cheaper to build a complete event_id => (type, state_key) dict, and then
+ # filter out the ones we don't want
#
- state_key_to_event_id_set = {
- e
+ event_id_to_state_key = {
+ event_id: key
for key_to_eid in itervalues(event_to_state_ids)
- for e in key_to_eid.items()
+ for key, event_id in iteritems(key_to_eid)
}
def include(typ, state_key):
@@ -346,7 +348,7 @@ def filter_events_for_server(store, server_name, events):
event_map = yield store.get_events([
e_id
- for key, e_id in state_key_to_event_id_set
+ for e_id, key in iteritems(event_id_to_state_key)
if include(key[0], key[1])
])
diff --git a/synctl b/synctl
index 1bdceda2..7e79b05c 120000..100755
--- a/synctl
+++ b/synctl
@@ -1 +1,296 @@
-./synapse/app/synctl.py \ No newline at end of file
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import collections
+import errno
+import glob
+import os
+import os.path
+import signal
+import subprocess
+import sys
+import time
+
+from six import iteritems
+
+import yaml
+
+SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
+
+GREEN = "\x1b[1;32m"
+YELLOW = "\x1b[1;33m"
+RED = "\x1b[1;31m"
+NORMAL = "\x1b[m"
+
+
+def pid_running(pid):
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError as err:
+ if err.errno == errno.EPERM:
+ return True
+ return False
+
+
+def write(message, colour=NORMAL, stream=sys.stdout):
+ # Lets check if we're writing to a TTY before colouring
+ should_colour = False
+ try:
+ should_colour = stream.isatty()
+ except AttributeError:
+ # Just in case `isatty` isn't defined on everything. The python
+ # docs are incredibly vague.
+ pass
+
+ if not should_colour:
+ stream.write(message + "\n")
+ else:
+ stream.write(colour + message + NORMAL + "\n")
+
+
+def abort(message, colour=RED, stream=sys.stderr):
+ write(message, colour, stream)
+ sys.exit(1)
+
+
+def start(configfile):
+ write("Starting ...")
+ args = SYNAPSE
+ args.extend(["--daemonize", "-c", configfile])
+
+ try:
+ subprocess.check_call(args)
+ write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
+ except subprocess.CalledProcessError as e:
+ write(
+ "error starting (exit code: %d); see above for logs" % e.returncode,
+ colour=RED,
+ )
+
+
+def start_worker(app, configfile, worker_configfile):
+ args = [sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile]
+
+ try:
+ subprocess.check_call(args)
+ write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
+ except subprocess.CalledProcessError as e:
+ write(
+ "error starting %s(%r) (exit code: %d); see above for logs"
+ % (app, worker_configfile, e.returncode),
+ colour=RED,
+ )
+
+
+def stop(pidfile, app):
+ if os.path.exists(pidfile):
+ pid = int(open(pidfile).read())
+ try:
+ os.kill(pid, signal.SIGTERM)
+ write("stopped %s" % (app,), colour=GREEN)
+ except OSError as err:
+ if err.errno == errno.ESRCH:
+ write("%s not running" % (app,), colour=YELLOW)
+ elif err.errno == errno.EPERM:
+ abort("Cannot stop %s: Operation not permitted" % (app,))
+ else:
+ abort("Cannot stop %s: Unknown error" % (app,))
+
+
+Worker = collections.namedtuple(
+ "Worker", ["app", "configfile", "pidfile", "cache_factor", "cache_factors"]
+)
+
+
+def main():
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "action",
+ choices=["start", "stop", "restart"],
+ help="whether to start, stop or restart the synapse",
+ )
+ parser.add_argument(
+ "configfile",
+ nargs="?",
+ default="homeserver.yaml",
+ help="the homeserver config file, defaults to homeserver.yaml",
+ )
+ parser.add_argument(
+ "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker"
+ )
+ parser.add_argument(
+ "-a",
+ "--all-processes",
+ metavar="WORKERCONFIGDIR",
+ help="start or stop all the workers in the given directory"
+ " and the main synapse process",
+ )
+
+ options = parser.parse_args()
+
+ if options.worker and options.all_processes:
+ write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr)
+ sys.exit(1)
+
+ configfile = options.configfile
+
+ if not os.path.exists(configfile):
+ write(
+ "No config file found\n"
+ "To generate a config file, run '%s -c %s --generate-config"
+ " --server-name=<server name>'\n" % (" ".join(SYNAPSE), options.configfile),
+ stream=sys.stderr,
+ )
+ sys.exit(1)
+
+ with open(configfile) as stream:
+ config = yaml.load(stream)
+
+ pidfile = config["pid_file"]
+ cache_factor = config.get("synctl_cache_factor")
+ start_stop_synapse = True
+
+ if cache_factor:
+ os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
+
+ cache_factors = config.get("synctl_cache_factors", {})
+ for cache_name, factor in iteritems(cache_factors):
+ os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
+
+ worker_configfiles = []
+ if options.worker:
+ start_stop_synapse = False
+ worker_configfile = options.worker
+ if not os.path.exists(worker_configfile):
+ write(
+ "No worker config found at %r" % (worker_configfile,), stream=sys.stderr
+ )
+ sys.exit(1)
+ worker_configfiles.append(worker_configfile)
+
+ if options.all_processes:
+ # To start the main synapse with -a you need to add a worker file
+ # with worker_app == "synapse.app.homeserver"
+ start_stop_synapse = False
+ worker_configdir = options.all_processes
+ if not os.path.isdir(worker_configdir):
+ write(
+ "No worker config directory found at %r" % (worker_configdir,),
+ stream=sys.stderr,
+ )
+ sys.exit(1)
+ worker_configfiles.extend(
+ sorted(glob.glob(os.path.join(worker_configdir, "*.yaml")))
+ )
+
+ workers = []
+ for worker_configfile in worker_configfiles:
+ with open(worker_configfile) as stream:
+ worker_config = yaml.load(stream)
+ worker_app = worker_config["worker_app"]
+ if worker_app == "synapse.app.homeserver":
+ # We need to special case all of this to pick up options that may
+ # be set in the main config file or in this worker config file.
+ worker_pidfile = worker_config.get("pid_file") or pidfile
+ worker_cache_factor = (
+ worker_config.get("synctl_cache_factor") or cache_factor
+ )
+ worker_cache_factors = (
+ worker_config.get("synctl_cache_factors") or cache_factors
+ )
+ daemonize = worker_config.get("daemonize") or config.get("daemonize")
+ assert daemonize, "Main process must have daemonize set to true"
+
+ # The master process doesn't support using worker_* config.
+ for key in worker_config:
+ if key == "worker_app": # But we allow worker_app
+ continue
+ assert not key.startswith(
+ "worker_"
+ ), "Main process cannot use worker_* config"
+ else:
+ worker_pidfile = worker_config["worker_pid_file"]
+ worker_daemonize = worker_config["worker_daemonize"]
+ assert worker_daemonize, "In config %r: expected '%s' to be True" % (
+ worker_configfile,
+ "worker_daemonize",
+ )
+ worker_cache_factor = worker_config.get("synctl_cache_factor")
+ worker_cache_factors = worker_config.get("synctl_cache_factors", {})
+ workers.append(
+ Worker(
+ worker_app,
+ worker_configfile,
+ worker_pidfile,
+ worker_cache_factor,
+ worker_cache_factors,
+ )
+ )
+
+ action = options.action
+
+ if action == "stop" or action == "restart":
+ for worker in workers:
+ stop(worker.pidfile, worker.app)
+
+ if start_stop_synapse:
+ stop(pidfile, "synapse.app.homeserver")
+
+ # Wait for synapse to actually shutdown before starting it again
+ if action == "restart":
+ running_pids = []
+ if start_stop_synapse and os.path.exists(pidfile):
+ running_pids.append(int(open(pidfile).read()))
+ for worker in workers:
+ if os.path.exists(worker.pidfile):
+ running_pids.append(int(open(worker.pidfile).read()))
+ if len(running_pids) > 0:
+ write("Waiting for process to exit before restarting...")
+ for running_pid in running_pids:
+ while pid_running(running_pid):
+ time.sleep(0.2)
+ write("All processes exited; now restarting...")
+
+ if action == "start" or action == "restart":
+ if start_stop_synapse:
+ # Check if synapse is already running
+ if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
+ abort("synapse.app.homeserver already running")
+ start(configfile)
+
+ for worker in workers:
+ env = os.environ.copy()
+
+ if worker.cache_factor:
+ os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
+
+ for cache_name, factor in iteritems(worker.cache_factors):
+ os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
+
+ start_worker(worker.app, configfile, worker.configfile)
+
+ # Reset env back to the original
+ os.environ.clear()
+ os.environ.update(env)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test_postgresql.sh b/test_postgresql.sh
new file mode 100755
index 00000000..1ffcaabd
--- /dev/null
+++ b/test_postgresql.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+# This script builds the Docker image to run the PostgreSQL tests, and then runs
+# the tests.
+
+set -e
+
+# Build, and tag
+docker build docker/ -f docker/Dockerfile-pgtests -t synapsepgtests
+
+# Run, mounting the current directory into /src
+docker run --rm -it -v $(pwd)\:/src synapsepgtests
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index f65a27e5..379e9c4a 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -471,6 +471,7 @@ class AuthTestCase(unittest.TestCase):
def test_reserved_threepid(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 1
+ self.store.get_monthly_active_count = lambda: defer.succeed(2)
threepid = {'medium': 'email', 'address': 'reserved@server.com'}
unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'}
self.hs.config.mau_limits_reserved_threepids = [threepid]
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 48b2d3d6..2a704480 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -60,7 +60,7 @@ class FilteringTestCase(unittest.TestCase):
invalid_filters = [
{"boom": {}},
{"account_data": "Hello World"},
- {"event_fields": ["\\foo"]},
+ {"event_fields": [r"\\foo"]},
{"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}},
{"event_format": "other"},
{"room": {"not_rooms": ["#foo:pik-test"]}},
@@ -109,6 +109,16 @@ class FilteringTestCase(unittest.TestCase):
"event_format": "client",
"event_fields": ["type", "content", "sender"],
},
+
+ # a single backslash should be permitted (though it is debatable whether
+ # it should be permitted before anything other than `.`, and what that
+ # actually means)
+ #
+ # (note that event_fields is implemented in
+ # synapse.events.utils.serialize_event, and so whether this actually works
+ # is tested elsewhere. We just want to check that it is allowed through the
+ # filter validation)
+ {"event_fields": [r"foo\.bar"]},
]
for filter in valid_filters:
try:
diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py
index 76b5090f..a83f567e 100644
--- a/tests/app/test_frontend_proxy.py
+++ b/tests/app/test_frontend_proxy.py
@@ -47,7 +47,7 @@ class FrontendProxyTests(HomeserverTestCase):
self.assertEqual(len(self.reactor.tcpServers), 1)
site = self.reactor.tcpServers[0][1]
self.resource = (
- site.resource.children["_matrix"].children["client"].children["r0"]
+ site.resource.children[b"_matrix"].children[b"client"].children[b"r0"]
)
request, channel = self.make_request("PUT", "presence/a/status")
@@ -77,7 +77,7 @@ class FrontendProxyTests(HomeserverTestCase):
self.assertEqual(len(self.reactor.tcpServers), 1)
site = self.reactor.tcpServers[0][1]
self.resource = (
- site.resource.children["_matrix"].children["client"].children["r0"]
+ site.resource.children[b"_matrix"].children[b"client"].children[b"r0"]
)
request, channel = self.make_request("PUT", "presence/a/status")
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index f88d28a1..0c23068b 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -67,6 +67,6 @@ class ConfigGenerationTestCase(unittest.TestCase):
with open(log_config_file) as f:
config = f.read()
# find the 'filename' line
- matches = re.findall("^\s*filename:\s*(.*)$", config, re.M)
+ matches = re.findall(r"^\s*filename:\s*(.*)$", config, re.M)
self.assertEqual(1, len(matches))
self.assertEqual(matches[0], expected)
diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py
new file mode 100644
index 00000000..f37a17d6
--- /dev/null
+++ b/tests/config/test_room_directory.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from synapse.config.room_directory import RoomDirectoryConfig
+
+from tests import unittest
+
+
+class RoomDirectoryConfigTestCase(unittest.TestCase):
+ def test_alias_creation_acl(self):
+ config = yaml.load("""
+ alias_creation_rules:
+ - user_id: "*bob*"
+ alias: "*"
+ action: "deny"
+ - user_id: "*"
+ alias: "#unofficial_*"
+ action: "allow"
+ - user_id: "@foo*:example.com"
+ alias: "*"
+ action: "allow"
+ - user_id: "@gah:example.com"
+ alias: "#goo:example.com"
+ action: "allow"
+ """)
+
+ rd_config = RoomDirectoryConfig()
+ rd_config.read_config(config)
+
+ self.assertFalse(rd_config.is_alias_creation_allowed(
+ user_id="@bob:example.com",
+ alias="#test:example.com",
+ ))
+
+ self.assertTrue(rd_config.is_alias_creation_allowed(
+ user_id="@test:example.com",
+ alias="#unofficial_st:example.com",
+ ))
+
+ self.assertTrue(rd_config.is_alias_creation_allowed(
+ user_id="@foobar:example.com",
+ alias="#test:example.com",
+ ))
+
+ self.assertTrue(rd_config.is_alias_creation_allowed(
+ user_id="@gah:example.com",
+ alias="#goo:example.com",
+ ))
+
+ self.assertFalse(rd_config.is_alias_creation_allowed(
+ user_id="@test:example.com",
+ alias="#test:example.com",
+ ))
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index ff217ca8..d0cc492d 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -156,7 +156,7 @@ class SerializeEventTestCase(unittest.TestCase):
room_id="!foo:bar",
content={"key.with.dots": {}},
),
- ["content.key\.with\.dots"],
+ [r"content.key\.with\.dots"],
),
{"content": {"key.with.dots": {}}},
)
@@ -172,7 +172,7 @@ class SerializeEventTestCase(unittest.TestCase):
"nested.dot.key": {"leaf.key": 42, "not_me_either": 1},
},
),
- ["content.nested\.dot\.key.leaf\.key"],
+ [r"content.nested\.dot\.key.leaf\.key"],
),
{"content": {"nested.dot.key": {"leaf.key": 42}}},
)
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index ec735568..8ae6556c 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -18,7 +18,9 @@ from mock import Mock
from twisted.internet import defer
+from synapse.config.room_directory import RoomDirectoryConfig
from synapse.handlers.directory import DirectoryHandler
+from synapse.rest.client.v1 import directory, room
from synapse.types import RoomAlias
from tests import unittest
@@ -102,3 +104,49 @@ class DirectoryTestCase(unittest.TestCase):
)
self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response)
+
+
+class TestCreateAliasACL(unittest.HomeserverTestCase):
+ user_id = "@test:test"
+
+ servlets = [directory.register_servlets, room.register_servlets]
+
+ def prepare(self, hs, reactor, clock):
+ # We cheekily override the config to add custom alias creation rules
+ config = {}
+ config["alias_creation_rules"] = [
+ {
+ "user_id": "*",
+ "alias": "#unofficial_*",
+ "action": "allow",
+ }
+ ]
+
+ rd_config = RoomDirectoryConfig()
+ rd_config.read_config(config)
+
+ self.hs.config.is_alias_creation_allowed = rd_config.is_alias_creation_allowed
+
+ return hs
+
+ def test_denied(self):
+ room_id = self.helper.create_room_as(self.user_id)
+
+ request, channel = self.make_request(
+ "PUT",
+ b"directory/room/%23test%3Atest",
+ ('{"room_id":"%s"}' % (room_id,)).encode('ascii'),
+ )
+ self.render(request)
+ self.assertEquals(403, channel.code, channel.result)
+
+ def test_allowed(self):
+ room_id = self.helper.create_room_as(self.user_id)
+
+ request, channel = self.make_request(
+ "PUT",
+ b"directory/room/%23unofficial_test%3Atest",
+ ('{"room_id":"%s"}' % (room_id,)).encode('ascii'),
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code, channel.result)
diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py
new file mode 100644
index 00000000..c8994f41
--- /dev/null
+++ b/tests/handlers/test_e2e_room_keys.py
@@ -0,0 +1,392 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+# Copyright 2017 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import mock
+
+from twisted.internet import defer
+
+import synapse.api.errors
+import synapse.handlers.e2e_room_keys
+import synapse.storage
+from synapse.api import errors
+
+from tests import unittest, utils
+
+# sample room_key data for use in the tests
+room_keys = {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": False,
+ "session_data": "SSBBTSBBIEZJU0gK"
+ }
+ }
+ }
+ }
+}
+
+
+class E2eRoomKeysHandlerTestCase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs)
+ self.hs = None # type: synapse.server.HomeServer
+ self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hs = yield utils.setup_test_homeserver(
+ self.addCleanup,
+ handlers=None,
+ replication_layer=mock.Mock(),
+ )
+ self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs)
+ self.local_user = "@boris:" + self.hs.hostname
+
+ @defer.inlineCallbacks
+ def test_get_missing_current_version_info(self):
+ """Check that we get a 404 if we ask for info about the current version
+ if there is no version.
+ """
+ res = None
+ try:
+ yield self.handler.get_version_info(self.local_user)
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_get_missing_version_info(self):
+ """Check that we get a 404 if we ask for info about a specific version
+ if it doesn't exist.
+ """
+ res = None
+ try:
+ yield self.handler.get_version_info(self.local_user, "bogus_version")
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_create_version(self):
+ """Check that we can create and then retrieve versions.
+ """
+ res = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(res, "1")
+
+ # check we can retrieve it as the current version
+ res = yield self.handler.get_version_info(self.local_user)
+ self.assertDictEqual(res, {
+ "version": "1",
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+
+ # check we can retrieve it as a specific version
+ res = yield self.handler.get_version_info(self.local_user, "1")
+ self.assertDictEqual(res, {
+ "version": "1",
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+
+ # upload a new one...
+ res = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "second_version_auth_data",
+ })
+ self.assertEqual(res, "2")
+
+ # check we can retrieve it as the current version
+ res = yield self.handler.get_version_info(self.local_user)
+ self.assertDictEqual(res, {
+ "version": "2",
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "second_version_auth_data",
+ })
+
+ @defer.inlineCallbacks
+ def test_delete_missing_version(self):
+ """Check that we get a 404 on deleting nonexistent versions
+ """
+ res = None
+ try:
+ yield self.handler.delete_version(self.local_user, "1")
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_delete_missing_current_version(self):
+ """Check that we get a 404 on deleting nonexistent current version
+ """
+ res = None
+ try:
+ yield self.handler.delete_version(self.local_user)
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_delete_version(self):
+ """Check that we can create and then delete versions.
+ """
+ res = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(res, "1")
+
+ # check we can delete it
+ yield self.handler.delete_version(self.local_user, "1")
+
+ # check that it's gone
+ res = None
+ try:
+ yield self.handler.get_version_info(self.local_user, "1")
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_get_missing_backup(self):
+ """Check that we get a 404 on querying missing backup
+ """
+ res = None
+ try:
+ yield self.handler.get_room_keys(self.local_user, "bogus_version")
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_get_missing_room_keys(self):
+ """Check we get an empty response from an empty backup
+ """
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ res = yield self.handler.get_room_keys(self.local_user, version)
+ self.assertDictEqual(res, {
+ "rooms": {}
+ })
+
+ # TODO: test the locking semantics when uploading room_keys,
+ # although this is probably best done in sytest
+
+ @defer.inlineCallbacks
+ def test_upload_room_keys_no_versions(self):
+ """Check that we get a 404 on uploading keys when no versions are defined
+ """
+ res = None
+ try:
+ yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys)
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_upload_room_keys_bogus_version(self):
+ """Check that we get a 404 on uploading keys when an nonexistent version
+ is specified
+ """
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ res = None
+ try:
+ yield self.handler.upload_room_keys(
+ self.local_user, "bogus_version", room_keys
+ )
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 404)
+
+ @defer.inlineCallbacks
+ def test_upload_room_keys_wrong_version(self):
+ """Check that we get a 403 on uploading keys for an old version
+ """
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "second_version_auth_data",
+ })
+ self.assertEqual(version, "2")
+
+ res = None
+ try:
+ yield self.handler.upload_room_keys(self.local_user, "1", room_keys)
+ except errors.SynapseError as e:
+ res = e.code
+ self.assertEqual(res, 403)
+
+ @defer.inlineCallbacks
+ def test_upload_room_keys_insert(self):
+ """Check that we can insert and retrieve keys for a session
+ """
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ yield self.handler.upload_room_keys(self.local_user, version, room_keys)
+
+ res = yield self.handler.get_room_keys(self.local_user, version)
+ self.assertDictEqual(res, room_keys)
+
+ # check getting room_keys for a given room
+ res = yield self.handler.get_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org"
+ )
+ self.assertDictEqual(res, room_keys)
+
+ # check getting room_keys for a given session_id
+ res = yield self.handler.get_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ session_id="c0ff33",
+ )
+ self.assertDictEqual(res, room_keys)
+
+ @defer.inlineCallbacks
+ def test_upload_room_keys_merge(self):
+ """Check that we can upload a new room_key for an existing session and
+ have it correctly merged"""
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ yield self.handler.upload_room_keys(self.local_user, version, room_keys)
+
+ new_room_keys = copy.deepcopy(room_keys)
+ new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']
+
+ # test that increasing the message_index doesn't replace the existing session
+ new_room_key['first_message_index'] = 2
+ new_room_key['session_data'] = 'new'
+ yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
+
+ res = yield self.handler.get_room_keys(self.local_user, version)
+ self.assertEqual(
+ res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
+ "SSBBTSBBIEZJU0gK"
+ )
+
+ # test that marking the session as verified however /does/ replace it
+ new_room_key['is_verified'] = True
+ yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
+
+ res = yield self.handler.get_room_keys(self.local_user, version)
+ self.assertEqual(
+ res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
+ "new"
+ )
+
+ # test that a session with a higher forwarded_count doesn't replace one
+ # with a lower forwarding count
+ new_room_key['forwarded_count'] = 2
+ new_room_key['session_data'] = 'other'
+ yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
+
+ res = yield self.handler.get_room_keys(self.local_user, version)
+ self.assertEqual(
+ res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
+ "new"
+ )
+
+ # TODO: check edge cases as well as the common variations here
+
+ @defer.inlineCallbacks
+ def test_delete_room_keys(self):
+ """Check that we can insert and delete keys for a session
+ """
+ version = yield self.handler.create_version(self.local_user, {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ })
+ self.assertEqual(version, "1")
+
+ # check for bulk-delete
+ yield self.handler.upload_room_keys(self.local_user, version, room_keys)
+ yield self.handler.delete_room_keys(self.local_user, version)
+ res = yield self.handler.get_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ session_id="c0ff33",
+ )
+ self.assertDictEqual(res, {
+ "rooms": {}
+ })
+
+ # check for bulk-delete per room
+ yield self.handler.upload_room_keys(self.local_user, version, room_keys)
+ yield self.handler.delete_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ )
+ res = yield self.handler.get_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ session_id="c0ff33",
+ )
+ self.assertDictEqual(res, {
+ "rooms": {}
+ })
+
+ # check for bulk-delete per session
+ yield self.handler.upload_room_keys(self.local_user, version, room_keys)
+ yield self.handler.delete_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ session_id="c0ff33",
+ )
+ res = yield self.handler.get_room_keys(
+ self.local_user,
+ version,
+ room_id="!abc:matrix.org",
+ session_id="c0ff33",
+ )
+ self.assertDictEqual(res, {
+ "rooms": {}
+ })
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 7b4ade3d..3e9a1907 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -19,7 +19,7 @@ from twisted.internet import defer
from synapse.api.errors import ResourceLimitError
from synapse.handlers.register import RegistrationHandler
-from synapse.types import UserID, create_requester
+from synapse.types import RoomAlias, UserID, create_requester
from tests.utils import setup_test_homeserver
@@ -41,30 +41,27 @@ class RegistrationTestCase(unittest.TestCase):
self.mock_captcha_client = Mock()
self.hs = yield setup_test_homeserver(
self.addCleanup,
- handlers=None,
- http_client=None,
expire_access_token=True,
- profile_handler=Mock(),
)
self.macaroon_generator = Mock(
generate_access_token=Mock(return_value='secret')
)
self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
- self.hs.handlers = RegistrationHandlers(self.hs)
self.handler = self.hs.get_handlers().registration_handler
self.store = self.hs.get_datastore()
self.hs.config.max_mau_value = 50
self.lots_of_users = 100
self.small_number_of_users = 1
+ self.requester = create_requester("@requester:test")
+
@defer.inlineCallbacks
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
- local_part = "someone"
- display_name = "someone"
- user_id = "@someone:test"
- requester = create_requester("@as:test")
+ frank = UserID.from_string("@frank:test")
+ user_id = frank.to_string()
+ requester = create_requester(user_id)
result_user_id, result_token = yield self.handler.get_or_create_user(
- requester, local_part, display_name
+ requester, frank.localpart, "Frankie"
)
self.assertEquals(result_user_id, user_id)
self.assertEquals(result_token, 'secret')
@@ -78,12 +75,11 @@ class RegistrationTestCase(unittest.TestCase):
token="jkv;g498752-43gj['eamb!-5",
password_hash=None,
)
- local_part = "frank"
- display_name = "Frank"
- user_id = "@frank:test"
- requester = create_requester("@as:test")
+ local_part = frank.localpart
+ user_id = frank.to_string()
+ requester = create_requester(user_id)
result_user_id, result_token = yield self.handler.get_or_create_user(
- requester, local_part, display_name
+ requester, local_part, None
)
self.assertEquals(result_user_id, user_id)
self.assertEquals(result_token, 'secret')
@@ -92,7 +88,7 @@ class RegistrationTestCase(unittest.TestCase):
def test_mau_limits_when_disabled(self):
self.hs.config.limit_usage_by_mau = False
# Ensure does not throw exception
- yield self.handler.get_or_create_user("requester", 'a', "display_name")
+ yield self.handler.get_or_create_user(self.requester, 'a', "display_name")
@defer.inlineCallbacks
def test_get_or_create_user_mau_not_blocked(self):
@@ -101,7 +97,7 @@ class RegistrationTestCase(unittest.TestCase):
return_value=defer.succeed(self.hs.config.max_mau_value - 1)
)
# Ensure does not throw exception
- yield self.handler.get_or_create_user("@user:server", 'c', "User")
+ yield self.handler.get_or_create_user(self.requester, 'c', "User")
@defer.inlineCallbacks
def test_get_or_create_user_mau_blocked(self):
@@ -110,13 +106,13 @@ class RegistrationTestCase(unittest.TestCase):
return_value=defer.succeed(self.lots_of_users)
)
with self.assertRaises(ResourceLimitError):
- yield self.handler.get_or_create_user("requester", 'b', "display_name")
+ yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
self.store.get_monthly_active_count = Mock(
return_value=defer.succeed(self.hs.config.max_mau_value)
)
with self.assertRaises(ResourceLimitError):
- yield self.handler.get_or_create_user("requester", 'b', "display_name")
+ yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
@defer.inlineCallbacks
def test_register_mau_blocked(self):
@@ -147,3 +143,44 @@ class RegistrationTestCase(unittest.TestCase):
)
with self.assertRaises(ResourceLimitError):
yield self.handler.register_saml2(localpart="local_part")
+
+ @defer.inlineCallbacks
+ def test_auto_create_auto_join_rooms(self):
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+ res = yield self.handler.register(localpart='jeff')
+ rooms = yield self.store.get_rooms_for_user(res[0])
+
+ directory_handler = self.hs.get_handlers().directory_handler
+ room_alias = RoomAlias.from_string(room_alias_str)
+ room_id = yield directory_handler.get_association(room_alias)
+
+ self.assertTrue(room_id['room_id'] in rooms)
+ self.assertEqual(len(rooms), 1)
+
+ @defer.inlineCallbacks
+ def test_auto_create_auto_join_rooms_with_no_rooms(self):
+ self.hs.config.auto_join_rooms = []
+ frank = UserID.from_string("@frank:test")
+ res = yield self.handler.register(frank.localpart)
+ self.assertEqual(res[0], frank.to_string())
+ rooms = yield self.store.get_rooms_for_user(res[0])
+ self.assertEqual(len(rooms), 0)
+
+ @defer.inlineCallbacks
+ def test_auto_create_auto_join_where_room_is_another_domain(self):
+ self.hs.config.auto_join_rooms = ["#room:another"]
+ frank = UserID.from_string("@frank:test")
+ res = yield self.handler.register(frank.localpart)
+ self.assertEqual(res[0], frank.to_string())
+ rooms = yield self.store.get_rooms_for_user(res[0])
+ self.assertEqual(len(rooms), 0)
+
+ @defer.inlineCallbacks
+ def test_auto_create_auto_join_where_auto_create_is_false(self):
+ self.hs.config.autocreate_auto_join_rooms = False
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+ res = yield self.handler.register(localpart='jeff')
+ rooms = yield self.store.get_rooms_for_user(res[0])
+ self.assertEqual(len(rooms), 0)
diff --git a/tests/handlers/test_roomlist.py b/tests/handlers/test_roomlist.py
new file mode 100644
index 00000000..61eebb69
--- /dev/null
+++ b/tests/handlers/test_roomlist.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.handlers.room_list import RoomListNextBatch
+
+import tests.unittest
+import tests.utils
+
+
+class RoomListTestCase(tests.unittest.TestCase):
+ """ Tests RoomList's RoomListNextBatch. """
+
+ def setUp(self):
+ pass
+
+ def test_check_read_batch_tokens(self):
+ batch_token = RoomListNextBatch(
+ stream_ordering="abcdef",
+ public_room_stream_id="123",
+ current_limit=20,
+ direction_is_forward=True,
+ ).to_token()
+ next_batch = RoomListNextBatch.from_token(batch_token)
+ self.assertEquals(next_batch.stream_ordering, "abcdef")
+ self.assertEquals(next_batch.public_room_stream_id, "123")
+ self.assertEquals(next_batch.current_limit, 20)
+ self.assertEquals(next_batch.direction_is_forward, True)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index c2d951b4..36e136cd 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -43,9 +43,7 @@ def _expect_edu_transaction(edu_type, content, origin="test"):
def _make_edu_transaction_json(edu_type, content):
- return json.dumps(_expect_edu_transaction(edu_type, content)).encode(
- 'utf8'
- )
+ return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8')
class TypingNotificationsTestCase(unittest.TestCase):
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
new file mode 100644
index 00000000..f3cb1423
--- /dev/null
+++ b/tests/http/test_fedclient.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet.defer import TimeoutError
+from twisted.internet.error import ConnectingCancelledError, DNSLookupError
+from twisted.web.client import ResponseNeverReceived
+from twisted.web.http import HTTPChannel
+
+from synapse.http.matrixfederationclient import (
+ MatrixFederationHttpClient,
+ MatrixFederationRequest,
+)
+
+from tests.server import FakeTransport
+from tests.unittest import HomeserverTestCase
+
+
+class FederationClientTests(HomeserverTestCase):
+ def make_homeserver(self, reactor, clock):
+
+ hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
+ hs.tls_client_options_factory = None
+ return hs
+
+ def prepare(self, reactor, clock, homeserver):
+
+ self.cl = MatrixFederationHttpClient(self.hs)
+ self.reactor.lookups["testserv"] = "1.2.3.4"
+
+ def test_dns_error(self):
+ """
+ If the DNS raising returns an error, it will bubble up.
+ """
+ d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
+ self.pump()
+
+ f = self.failureResultOf(d)
+ self.assertIsInstance(f.value, DNSLookupError)
+
+ def test_client_never_connect(self):
+ """
+ If the HTTP request is not connected and is timed out, it'll give a
+ ConnectingCancelledError or TimeoutError.
+ """
+ d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+
+ self.pump()
+
+ # Nothing happened yet
+ self.assertFalse(d.called)
+
+ # Make sure treq is trying to connect
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ self.assertEqual(clients[0][0], '1.2.3.4')
+ self.assertEqual(clients[0][1], 8008)
+
+ # Deferred is still without a result
+ self.assertFalse(d.called)
+
+ # Push by enough to time it out
+ self.reactor.advance(10.5)
+ f = self.failureResultOf(d)
+
+ self.assertIsInstance(f.value, (ConnectingCancelledError, TimeoutError))
+
+ def test_client_connect_no_response(self):
+ """
+ If the HTTP request is connected, but gets no response before being
+ timed out, it'll give a ResponseNeverReceived.
+ """
+ d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+
+ self.pump()
+
+ # Nothing happened yet
+ self.assertFalse(d.called)
+
+ # Make sure treq is trying to connect
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ self.assertEqual(clients[0][0], '1.2.3.4')
+ self.assertEqual(clients[0][1], 8008)
+
+ conn = Mock()
+ client = clients[0][2].buildProtocol(None)
+ client.makeConnection(conn)
+
+ # Deferred is still without a result
+ self.assertFalse(d.called)
+
+ # Push by enough to time it out
+ self.reactor.advance(10.5)
+ f = self.failureResultOf(d)
+
+ self.assertIsInstance(f.value, ResponseNeverReceived)
+
+ def test_client_gets_headers(self):
+ """
+ Once the client gets the headers, _request returns successfully.
+ """
+ request = MatrixFederationRequest(
+ method="GET",
+ destination="testserv:8008",
+ path="foo/bar",
+ )
+ d = self.cl._send_request(request, timeout=10000)
+
+ self.pump()
+
+ conn = Mock()
+ clients = self.reactor.tcpClients
+ client = clients[0][2].buildProtocol(None)
+ client.makeConnection(conn)
+
+ # Deferred does not have a result
+ self.assertFalse(d.called)
+
+ # Send it the HTTP response
+ client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
+
+ # We should get a successful response
+ r = self.successResultOf(d)
+ self.assertEqual(r.code, 200)
+
+ def test_client_headers_no_body(self):
+ """
+ If the HTTP request is connected, but gets no response before being
+ timed out, it'll give a ResponseNeverReceived.
+ """
+ d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000)
+
+ self.pump()
+
+ conn = Mock()
+ clients = self.reactor.tcpClients
+ client = clients[0][2].buildProtocol(None)
+ client.makeConnection(conn)
+
+ # Deferred does not have a result
+ self.assertFalse(d.called)
+
+ # Send it the HTTP response
+ client.dataReceived(
+ (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
+ b"Server: Fake\r\n\r\n")
+ )
+
+ # Push by enough to time it out
+ self.reactor.advance(10.5)
+ f = self.failureResultOf(d)
+
+ self.assertIsInstance(f.value, TimeoutError)
+
+ def test_client_sends_body(self):
+ self.cl.post_json(
+ "testserv:8008", "foo/bar", timeout=10000,
+ data={"a": "b"}
+ )
+
+ self.pump()
+
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ client = clients[0][2].buildProtocol(None)
+ server = HTTPChannel()
+
+ client.makeConnection(FakeTransport(server, self.reactor))
+ server.makeConnection(FakeTransport(client, self.reactor))
+
+ self.pump(0.1)
+
+ self.assertEqual(len(server.requests), 1)
+ request = server.requests[0]
+ content = request.content.read()
+ self.assertEqual(content, b'{"a":"b"}')
diff --git a/tests/push/__init__.py b/tests/push/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/push/__init__.py
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
new file mode 100644
index 00000000..50ee6910
--- /dev/null
+++ b/tests/push/test_email.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pkg_resources
+
+from twisted.internet.defer import Deferred
+
+from synapse.rest.client.v1 import admin, login, room
+
+from tests.unittest import HomeserverTestCase
+
+try:
+ from synapse.push.mailer import load_jinja2_templates
+except Exception:
+ load_jinja2_templates = None
+
+
+class EmailPusherTests(HomeserverTestCase):
+
+ skip = "No Jinja installed" if not load_jinja2_templates else None
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+ user_id = True
+ hijack_auth = False
+
+ def make_homeserver(self, reactor, clock):
+
+ # List[Tuple[Deferred, args, kwargs]]
+ self.email_attempts = []
+
+ def sendmail(*args, **kwargs):
+ d = Deferred()
+ self.email_attempts.append((d, args, kwargs))
+ return d
+
+ config = self.default_config()
+ config.email_enable_notifs = True
+ config.start_pushers = True
+
+ config.email_template_dir = os.path.abspath(
+ pkg_resources.resource_filename('synapse', 'res/templates')
+ )
+ config.email_notif_template_html = "notif_mail.html"
+ config.email_notif_template_text = "notif_mail.txt"
+ config.email_smtp_host = "127.0.0.1"
+ config.email_smtp_port = 20
+ config.require_transport_security = False
+ config.email_smtp_user = None
+ config.email_app_name = "Matrix"
+ config.email_notif_from = "test@example.com"
+
+ hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
+
+ return hs
+
+ def test_sends_email(self):
+
+ # Register the user who gets notified
+ user_id = self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # Register the user who sends the message
+ other_user_id = self.register_user("otheruser", "pass")
+ other_access_token = self.login("otheruser", "pass")
+
+ # Register the pusher
+ user_tuple = self.get_success(
+ self.hs.get_datastore().get_user_by_access_token(access_token)
+ )
+ token_id = user_tuple["token_id"]
+
+ self.get_success(
+ self.hs.get_pusherpool().add_pusher(
+ user_id=user_id,
+ access_token=token_id,
+ kind="email",
+ app_id="m.email",
+ app_display_name="Email Notifications",
+ device_display_name="a@example.com",
+ pushkey="a@example.com",
+ lang=None,
+ data={},
+ )
+ )
+
+ # Create a room
+ room = self.helper.create_room_as(user_id, tok=access_token)
+
+ # Invite the other person
+ self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+ # The other user joins
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # The other user sends some messages
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.helper.send(room, body="There!", tok=other_access_token)
+
+ # Get the stream ordering before it gets sent
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+ # Advance time a bit, so the pusher will register something has happened
+ self.pump(100)
+
+ # It hasn't succeeded yet, so the stream ordering shouldn't have moved
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
+
+ # One email was attempted to be sent
+ self.assertEqual(len(self.email_attempts), 1)
+
+ # Make the email succeed
+ self.email_attempts[0][0].callback(True)
+ self.pump()
+
+ # One email was attempted to be sent
+ self.assertEqual(len(self.email_attempts), 1)
+
+ # The stream ordering has increased
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
new file mode 100644
index 00000000..addc01ab
--- /dev/null
+++ b/tests/push/test_http.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet.defer import Deferred
+
+from synapse.rest.client.v1 import admin, login, room
+
+from tests.unittest import HomeserverTestCase
+
+try:
+ from synapse.push.mailer import load_jinja2_templates
+except Exception:
+ load_jinja2_templates = None
+
+
+class HTTPPusherTests(HomeserverTestCase):
+
+ skip = "No Jinja installed" if not load_jinja2_templates else None
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+ user_id = True
+ hijack_auth = False
+
+ def make_homeserver(self, reactor, clock):
+
+ self.push_attempts = []
+
+ m = Mock()
+
+ def post_json_get_json(url, body):
+ d = Deferred()
+ self.push_attempts.append((d, url, body))
+ return d
+
+ m.post_json_get_json = post_json_get_json
+
+ config = self.default_config()
+ config.start_pushers = True
+
+ hs = self.setup_test_homeserver(config=config, simple_http_client=m)
+
+ return hs
+
+ def test_sends_http(self):
+ """
+ The HTTP pusher will send pushes for each message to a HTTP endpoint
+ when configured to do so.
+ """
+ # Register the user who gets notified
+ user_id = self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # Register the user who sends the message
+ other_user_id = self.register_user("otheruser", "pass")
+ other_access_token = self.login("otheruser", "pass")
+
+ # Register the pusher
+ user_tuple = self.get_success(
+ self.hs.get_datastore().get_user_by_access_token(access_token)
+ )
+ token_id = user_tuple["token_id"]
+
+ self.get_success(
+ self.hs.get_pusherpool().add_pusher(
+ user_id=user_id,
+ access_token=token_id,
+ kind="http",
+ app_id="m.http",
+ app_display_name="HTTP Push Notifications",
+ device_display_name="pushy push",
+ pushkey="a@example.com",
+ lang=None,
+ data={"url": "example.com"},
+ )
+ )
+
+ # Create a room
+ room = self.helper.create_room_as(user_id, tok=access_token)
+
+ # Invite the other person
+ self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+ # The other user joins
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # The other user sends some messages
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.helper.send(room, body="There!", tok=other_access_token)
+
+ # Get the stream ordering before it gets sent
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+ # Advance time a bit, so the pusher will register something has happened
+ self.pump()
+
+ # It hasn't succeeded yet, so the stream ordering shouldn't have moved
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
+
+ # One push was attempted to be sent -- it'll be the first message
+ self.assertEqual(len(self.push_attempts), 1)
+ self.assertEqual(self.push_attempts[0][1], "example.com")
+ self.assertEqual(
+ self.push_attempts[0][2]["notification"]["content"]["body"], "Hi!"
+ )
+
+ # Make the push succeed
+ self.push_attempts[0][0].callback({})
+ self.pump()
+
+ # The stream ordering has increased
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
+ last_stream_ordering = pushers[0]["last_stream_ordering"]
+
+ # Now it'll try and send the second push message, which will be the second one
+ self.assertEqual(len(self.push_attempts), 2)
+ self.assertEqual(self.push_attempts[1][1], "example.com")
+ self.assertEqual(
+ self.push_attempts[1][2]["notification"]["content"]["body"], "There!"
+ )
+
+ # Make the second push succeed
+ self.push_attempts[1][0].callback({})
+ self.pump()
+
+ # The stream ordering has increased, again
+ pushers = self.get_success(
+ self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
+ )
+ self.assertEqual(len(pushers), 1)
+ self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py
index 089cecfb..9e9fbbfe 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/slave/storage/_base.py
@@ -15,8 +15,6 @@
from mock import Mock, NonCallableMock
-import attr
-
from synapse.replication.tcp.client import (
ReplicationClientFactory,
ReplicationClientHandler,
@@ -24,6 +22,7 @@ from synapse.replication.tcp.client import (
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from tests import unittest
+from tests.server import FakeTransport
class BaseSlavedStoreTestCase(unittest.HomeserverTestCase):
@@ -56,36 +55,8 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase):
server = server_factory.buildProtocol(None)
client = client_factory.buildProtocol(None)
- @attr.s
- class FakeTransport(object):
-
- other = attr.ib()
- disconnecting = False
- buffer = attr.ib(default=b'')
-
- def registerProducer(self, producer, streaming):
-
- self.producer = producer
-
- def _produce():
- self.producer.resumeProducing()
- reactor.callLater(0.1, _produce)
-
- reactor.callLater(0.0, _produce)
-
- def write(self, byt):
- self.buffer = self.buffer + byt
-
- if getattr(self.other, "transport") is not None:
- self.other.dataReceived(self.buffer)
- self.buffer = b""
-
- def writeSequence(self, seq):
- for x in seq:
- self.write(x)
-
- client.makeConnection(FakeTransport(server))
- server.makeConnection(FakeTransport(client))
+ client.makeConnection(FakeTransport(server, reactor))
+ server.makeConnection(FakeTransport(client, reactor))
def replicate(self):
"""Tell the master side of replication that something has happened, and then
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index db44d33c..1688a741 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from canonicaljson import encode_canonical_json
+
from synapse.events import FrozenEvent, _EventInternalMetadata
from synapse.events.snapshot import EventContext
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -26,7 +28,9 @@ ROOM_ID = "!room:blue"
def dict_equals(self, other):
- return self.__dict__ == other.__dict__
+ me = encode_canonical_json(self.get_pdu_json())
+ them = encode_canonical_json(other.get_pdu_json())
+ return me == them
def patch__eq__(cls):
diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py
new file mode 100644
index 00000000..4294bbec
--- /dev/null
+++ b/tests/rest/client/test_consent.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from synapse.api.urls import ConsentURIBuilder
+from synapse.rest.client.v1 import admin, login, room
+from synapse.rest.consent import consent_resource
+
+from tests import unittest
+from tests.server import render
+
+try:
+ from synapse.push.mailer import load_jinja2_templates
+except Exception:
+ load_jinja2_templates = None
+
+
+class ConsentResourceTestCase(unittest.HomeserverTestCase):
+ skip = "No Jinja installed" if not load_jinja2_templates else None
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+ user_id = True
+ hijack_auth = False
+
+ def make_homeserver(self, reactor, clock):
+
+ config = self.default_config()
+ config.user_consent_version = "1"
+ config.public_baseurl = ""
+ config.form_secret = "123abc"
+
+ # Make some temporary templates...
+ temp_consent_path = self.mktemp()
+ os.mkdir(temp_consent_path)
+ os.mkdir(os.path.join(temp_consent_path, 'en'))
+ config.user_consent_template_dir = os.path.abspath(temp_consent_path)
+
+ with open(os.path.join(temp_consent_path, "en/1.html"), 'w') as f:
+ f.write("{{version}},{{has_consented}}")
+
+ with open(os.path.join(temp_consent_path, "en/success.html"), 'w') as f:
+ f.write("yay!")
+
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def test_render_public_consent(self):
+ """You can observe the terms form without specifying a user"""
+ resource = consent_resource.ConsentResource(self.hs)
+ request, channel = self.make_request("GET", "/consent?v=1", shorthand=False)
+ render(request, resource, self.reactor)
+ self.assertEqual(channel.code, 200)
+
+ def test_accept_consent(self):
+ """
+ A user can use the consent form to accept the terms.
+ """
+ uri_builder = ConsentURIBuilder(self.hs.config)
+ resource = consent_resource.ConsentResource(self.hs)
+
+ # Register a user
+ user_id = self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # Fetch the consent page, to get the consent version
+ consent_uri = (
+ uri_builder.build_user_consent_uri(user_id).replace("_matrix/", "")
+ + "&u=user"
+ )
+ request, channel = self.make_request(
+ "GET", consent_uri, access_token=access_token, shorthand=False
+ )
+ render(request, resource, self.reactor)
+ self.assertEqual(channel.code, 200)
+
+ # Get the version from the body, and whether we've consented
+ version, consented = channel.result["body"].decode('ascii').split(",")
+ self.assertEqual(consented, "False")
+
+ # POST to the consent page, saying we've agreed
+ request, channel = self.make_request(
+ "POST",
+ consent_uri + "&v=" + version,
+ access_token=access_token,
+ shorthand=False,
+ )
+ render(request, resource, self.reactor)
+ self.assertEqual(channel.code, 200)
+
+ # Fetch the consent page, to get the consent version -- it should have
+ # changed
+ request, channel = self.make_request(
+ "GET", consent_uri, access_token=access_token, shorthand=False
+ )
+ render(request, resource, self.reactor)
+ self.assertEqual(channel.code, 200)
+
+ # Get the version from the body, and check that it's the version we
+ # agreed to, and that we've consented to it.
+ version, consented = channel.result["body"].decode('ascii').split(",")
+ self.assertEqual(consented, "True")
+ self.assertEqual(version, "1")
diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/client/v1/test_admin.py
index 1a553fa3..e38eb628 100644
--- a/tests/rest/client/v1/test_admin.py
+++ b/tests/rest/client/v1/test_admin.py
@@ -19,24 +19,17 @@ import json
from mock import Mock
-from synapse.http.server import JsonResource
from synapse.rest.client.v1.admin import register_servlets
-from synapse.util import Clock
from tests import unittest
-from tests.server import (
- ThreadedMemoryReactorClock,
- make_request,
- render,
- setup_test_homeserver,
-)
-class UserRegisterTestCase(unittest.TestCase):
- def setUp(self):
+class UserRegisterTestCase(unittest.HomeserverTestCase):
+
+ servlets = [register_servlets]
+
+ def make_homeserver(self, reactor, clock):
- self.clock = ThreadedMemoryReactorClock()
- self.hs_clock = Clock(self.clock)
self.url = "/_matrix/client/r0/admin/register"
self.registration_handler = Mock()
@@ -50,17 +43,14 @@ class UserRegisterTestCase(unittest.TestCase):
self.secrets = Mock()
- self.hs = setup_test_homeserver(
- self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
- )
+ self.hs = self.setup_test_homeserver()
self.hs.config.registration_shared_secret = u"shared"
self.hs.get_media_repository = Mock()
self.hs.get_deactivate_account_handler = Mock()
- self.resource = JsonResource(self.hs)
- register_servlets(self.hs, self.resource)
+ return self.hs
def test_disabled(self):
"""
@@ -69,8 +59,8 @@ class UserRegisterTestCase(unittest.TestCase):
"""
self.hs.config.registration_shared_secret = None
- request, channel = make_request("POST", self.url, b'{}')
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, b'{}')
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(
@@ -87,8 +77,8 @@ class UserRegisterTestCase(unittest.TestCase):
self.hs.get_secrets = Mock(return_value=secrets)
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
self.assertEqual(channel.json_body, {"nonce": "abcd"})
@@ -97,25 +87,25 @@ class UserRegisterTestCase(unittest.TestCase):
Calling GET on the endpoint will return a randomised nonce, which will
only last for SALT_TIMEOUT (60s).
"""
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
nonce = channel.json_body["nonce"]
# 59 seconds
- self.clock.advance(59)
+ self.reactor.advance(59)
body = json.dumps({"nonce": nonce})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('username must be specified', channel.json_body["error"])
# 61 seconds
- self.clock.advance(2)
+ self.reactor.advance(2)
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('unrecognised nonce', channel.json_body["error"])
@@ -124,8 +114,8 @@ class UserRegisterTestCase(unittest.TestCase):
"""
Only the provided nonce can be used, as it's checked in the MAC.
"""
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
@@ -141,8 +131,8 @@ class UserRegisterTestCase(unittest.TestCase):
"mac": want_mac,
}
)
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("HMAC incorrect", channel.json_body["error"])
@@ -152,8 +142,8 @@ class UserRegisterTestCase(unittest.TestCase):
When the correct nonce is provided, and the right key is provided, the
user is registered.
"""
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
@@ -169,8 +159,8 @@ class UserRegisterTestCase(unittest.TestCase):
"mac": want_mac,
}
)
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
@@ -179,8 +169,8 @@ class UserRegisterTestCase(unittest.TestCase):
"""
A valid unrecognised nonce.
"""
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
@@ -196,15 +186,15 @@ class UserRegisterTestCase(unittest.TestCase):
"mac": want_mac,
}
)
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
# Now, try and reuse it
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('unrecognised nonce', channel.json_body["error"])
@@ -217,8 +207,8 @@ class UserRegisterTestCase(unittest.TestCase):
"""
def nonce():
- request, channel = make_request("GET", self.url)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("GET", self.url)
+ self.render(request)
return channel.json_body["nonce"]
#
@@ -227,8 +217,8 @@ class UserRegisterTestCase(unittest.TestCase):
# Must be present
body = json.dumps({})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('nonce must be specified', channel.json_body["error"])
@@ -239,32 +229,32 @@ class UserRegisterTestCase(unittest.TestCase):
# Must be present
body = json.dumps({"nonce": nonce()})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('username must be specified', channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": 1234})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": u"abcd\u0000"})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
@@ -275,16 +265,16 @@ class UserRegisterTestCase(unittest.TestCase):
# Must be present
body = json.dumps({"nonce": nonce(), "username": "a"})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('password must be specified', channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
@@ -293,16 +283,16 @@ class UserRegisterTestCase(unittest.TestCase):
body = json.dumps(
{"nonce": nonce(), "username": "a", "password": u"abcd\u0000"}
)
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
# Super long
body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
- request, channel = make_request("POST", self.url, body.encode('utf8'))
- render(request, self.resource, self.clock)
+ request, channel = self.make_request("POST", self.url, body.encode('utf8'))
+ self.render(request)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
diff --git a/tests/rest/client/v1/test_register.py b/tests/rest/client/v1/test_register.py
index 6b7ff813..f973eff8 100644
--- a/tests/rest/client/v1/test_register.py
+++ b/tests/rest/client/v1/test_register.py
@@ -45,11 +45,11 @@ class CreateUserServletTestCase(unittest.TestCase):
)
handlers = Mock(registration_handler=self.registration_handler)
- self.clock = MemoryReactorClock()
- self.hs_clock = Clock(self.clock)
+ self.reactor = MemoryReactorClock()
+ self.hs_clock = Clock(self.reactor)
self.hs = self.hs = setup_test_homeserver(
- self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
+ self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
)
self.hs.get_datastore = Mock(return_value=self.datastore)
self.hs.get_handlers = Mock(return_value=handlers)
@@ -76,8 +76,8 @@ class CreateUserServletTestCase(unittest.TestCase):
return_value=(user_id, token)
)
- request, channel = make_request(b"POST", url, request_data)
- render(request, res, self.clock)
+ request, channel = make_request(self.reactor, b"POST", url, request_data)
+ render(request, res, self.reactor)
self.assertEquals(channel.result["code"], b"200")
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 9fe07604..a824be9a 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -22,39 +22,24 @@ from six.moves.urllib import parse as urlparse
from twisted.internet import defer
-import synapse.rest.client.v1.room
from synapse.api.constants import Membership
-from synapse.http.server import JsonResource
-from synapse.types import UserID
-from synapse.util import Clock
+from synapse.rest.client.v1 import admin, login, room
from tests import unittest
-from tests.server import (
- ThreadedMemoryReactorClock,
- make_request,
- render,
- setup_test_homeserver,
-)
-
-from .utils import RestHelper
PATH_PREFIX = b"/_matrix/client/api/v1"
-class RoomBase(unittest.TestCase):
+class RoomBase(unittest.HomeserverTestCase):
rmcreator_id = None
- def setUp(self):
+ servlets = [room.register_servlets, room.register_deprecated_servlets]
- self.clock = ThreadedMemoryReactorClock()
- self.hs_clock = Clock(self.clock)
+ def make_homeserver(self, reactor, clock):
- self.hs = setup_test_homeserver(
- self.addCleanup,
+ self.hs = self.setup_test_homeserver(
"red",
http_client=None,
- clock=self.hs_clock,
- reactor=self.clock,
federation_client=Mock(),
ratelimiter=NonCallableMock(spec_set=["send_message"]),
)
@@ -63,42 +48,21 @@ class RoomBase(unittest.TestCase):
self.hs.get_federation_handler = Mock(return_value=Mock())
- def get_user_by_access_token(token=None, allow_guest=False):
- return {
- "user": UserID.from_string(self.helper.auth_user_id),
- "token_id": 1,
- "is_guest": False,
- }
-
- def get_user_by_req(request, allow_guest=False, rights="access"):
- return synapse.types.create_requester(
- UserID.from_string(self.helper.auth_user_id), 1, False, None
- )
-
- self.hs.get_auth().get_user_by_req = get_user_by_req
- self.hs.get_auth().get_user_by_access_token = get_user_by_access_token
- self.hs.get_auth().get_access_token_from_request = Mock(return_value=b"1234")
-
def _insert_client_ip(*args, **kwargs):
return defer.succeed(None)
self.hs.get_datastore().insert_client_ip = _insert_client_ip
- self.resource = JsonResource(self.hs)
- synapse.rest.client.v1.room.register_servlets(self.hs, self.resource)
- synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource)
- self.helper = RestHelper(self.hs, self.resource, self.user_id)
+ return self.hs
class RoomPermissionsTestCase(RoomBase):
""" Tests room permissions. """
- user_id = b"@sid1:red"
- rmcreator_id = b"@notme:red"
-
- def setUp(self):
+ user_id = "@sid1:red"
+ rmcreator_id = "@notme:red"
- super(RoomPermissionsTestCase, self).setUp()
+ def prepare(self, reactor, clock, hs):
self.helper.auth_user_id = self.rmcreator_id
# create some rooms under the name rmcreator_id
@@ -114,22 +78,20 @@ class RoomPermissionsTestCase(RoomBase):
self.created_rmid_msg_path = (
"rooms/%s/send/m.room.message/a1" % (self.created_rmid)
).encode('ascii')
- request, channel = make_request(
- b"PUT",
- self.created_rmid_msg_path,
- b'{"msgtype":"m.text","body":"test msg"}',
+ request, channel = self.make_request(
+ "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}'
)
- render(request, self.resource, self.clock)
- self.assertEquals(channel.result["code"], b"200", channel.result)
+ self.render(request)
+ self.assertEquals(200, channel.code, channel.result)
# set topic for public room
- request, channel = make_request(
- b"PUT",
+ request, channel = self.make_request(
+ "PUT",
("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode('ascii'),
b'{"topic":"Public Room Topic"}',
)
- render(request, self.resource, self.clock)
- self.assertEquals(channel.result["code"], b"200", channel.result)
+ self.render(request)
+ self.assertEquals(200, channel.code, channel.result)
# auth as user_id now
self.helper.auth_user_id = self.user_id
@@ -140,128 +102,128 @@ class RoomPermissionsTestCase(RoomBase):
seq = iter(range(100))
def send_msg_path():
- return b"/rooms/%s/send/m.room.message/mid%s" % (
+ return "/rooms/%s/send/m.room.message/mid%s" % (
self.created_rmid,
- str(next(seq)).encode('ascii'),
+ str(next(seq)),
)
# send message in uncreated room, expect 403
- request, channel = make_request(
- b"PUT",
- b"/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,),
+ request, channel = self.make_request(
+ "PUT",
+ "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,),
msg_content,
)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# send message in created room not joined (no state), expect 403
- request, channel = make_request(b"PUT", send_msg_path(), msg_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", send_msg_path(), msg_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# send message in created room and invited, expect 403
self.helper.invite(
room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id
)
- request, channel = make_request(b"PUT", send_msg_path(), msg_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", send_msg_path(), msg_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# send message in created room and joined, expect 200
self.helper.join(room=self.created_rmid, user=self.user_id)
- request, channel = make_request(b"PUT", send_msg_path(), msg_content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", send_msg_path(), msg_content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
# send message in created room and left, expect 403
self.helper.leave(room=self.created_rmid, user=self.user_id)
- request, channel = make_request(b"PUT", send_msg_path(), msg_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", send_msg_path(), msg_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
def test_topic_perms(self):
topic_content = b'{"topic":"My Topic Name"}'
- topic_path = b"/rooms/%s/state/m.room.topic" % self.created_rmid
+ topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid
# set/get topic in uncreated room, expect 403
- request, channel = make_request(
- b"PUT", b"/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content
+ request, channel = self.make_request(
+ "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content
)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
- request, channel = make_request(
- b"GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
+ request, channel = self.make_request(
+ "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid
)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# set/get topic in created PRIVATE room not joined, expect 403
- request, channel = make_request(b"PUT", topic_path, topic_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
- request, channel = make_request(b"GET", topic_path)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", topic_path, topic_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
+ request, channel = self.make_request("GET", topic_path)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# set topic in created PRIVATE room and invited, expect 403
self.helper.invite(
room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id
)
- request, channel = make_request(b"PUT", topic_path, topic_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", topic_path, topic_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# get topic in created PRIVATE room and invited, expect 403
- request, channel = make_request(b"GET", topic_path)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", topic_path)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# set/get topic in created PRIVATE room and joined, expect 200
self.helper.join(room=self.created_rmid, user=self.user_id)
# Only room ops can set topic by default
self.helper.auth_user_id = self.rmcreator_id
- request, channel = make_request(b"PUT", topic_path, topic_content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", topic_path, topic_content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.helper.auth_user_id = self.user_id
- request, channel = make_request(b"GET", topic_path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
- self.assert_dict(json.loads(topic_content), channel.json_body)
+ request, channel = self.make_request("GET", topic_path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
+ self.assert_dict(json.loads(topic_content.decode('utf8')), channel.json_body)
# set/get topic in created PRIVATE room and left, expect 403
self.helper.leave(room=self.created_rmid, user=self.user_id)
- request, channel = make_request(b"PUT", topic_path, topic_content)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
- request, channel = make_request(b"GET", topic_path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", topic_path, topic_content)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
+ request, channel = self.make_request("GET", topic_path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
# get topic in PUBLIC room, not joined, expect 403
- request, channel = make_request(
- b"GET", b"/rooms/%s/state/m.room.topic" % self.created_public_rmid
+ request, channel = self.make_request(
+ "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid
)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
# set topic in PUBLIC room, not joined, expect 403
- request, channel = make_request(
- b"PUT",
- b"/rooms/%s/state/m.room.topic" % self.created_public_rmid,
+ request, channel = self.make_request(
+ "PUT",
+ "/rooms/%s/state/m.room.topic" % self.created_public_rmid,
topic_content,
)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
def _test_get_membership(self, room=None, members=[], expect_code=None):
for member in members:
- path = b"/rooms/%s/state/m.room.member/%s" % (room, member)
- request, channel = make_request(b"GET", path)
- render(request, self.resource, self.clock)
- self.assertEquals(expect_code, int(channel.result["code"]))
+ path = "/rooms/%s/state/m.room.member/%s" % (room, member)
+ request, channel = self.make_request("GET", path)
+ self.render(request)
+ self.assertEquals(expect_code, channel.code)
def test_membership_basic_room_perms(self):
# === room does not exist ===
@@ -428,217 +390,211 @@ class RoomPermissionsTestCase(RoomBase):
class RoomsMemberListTestCase(RoomBase):
""" Tests /rooms/$room_id/members/list REST events."""
- user_id = b"@sid1:red"
+ user_id = "@sid1:red"
def test_get_member_list(self):
room_id = self.helper.create_room_as(self.user_id)
- request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", "/rooms/%s/members" % room_id)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
def test_get_member_list_no_room(self):
- request, channel = make_request(b"GET", b"/rooms/roomdoesnotexist/members")
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", "/rooms/roomdoesnotexist/members")
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
def test_get_member_list_no_permission(self):
- room_id = self.helper.create_room_as(b"@some_other_guy:red")
- request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ room_id = self.helper.create_room_as("@some_other_guy:red")
+ request, channel = self.make_request("GET", "/rooms/%s/members" % room_id)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
def test_get_member_list_mixed_memberships(self):
- room_creator = b"@some_other_guy:red"
+ room_creator = "@some_other_guy:red"
room_id = self.helper.create_room_as(room_creator)
- room_path = b"/rooms/%s/members" % room_id
+ room_path = "/rooms/%s/members" % room_id
self.helper.invite(room=room_id, src=room_creator, targ=self.user_id)
# can't see list if you're just invited.
- request, channel = make_request(b"GET", room_path)
- render(request, self.resource, self.clock)
- self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", room_path)
+ self.render(request)
+ self.assertEquals(403, channel.code, msg=channel.result["body"])
self.helper.join(room=room_id, user=self.user_id)
# can see list now joined
- request, channel = make_request(b"GET", room_path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", room_path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.helper.leave(room=room_id, user=self.user_id)
# can see old list once left
- request, channel = make_request(b"GET", room_path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", room_path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
class RoomsCreateTestCase(RoomBase):
""" Tests /rooms and /rooms/$room_id REST events. """
- user_id = b"@sid1:red"
+ user_id = "@sid1:red"
def test_post_room_no_keys(self):
# POST with no config keys, expect new room id
- request, channel = make_request(b"POST", b"/createRoom", b"{}")
+ request, channel = self.make_request("POST", "/createRoom", "{}")
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), channel.result)
+ self.render(request)
+ self.assertEquals(200, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_visibility_key(self):
# POST with visibility config key, expect new room id
- request, channel = make_request(
- b"POST", b"/createRoom", b'{"visibility":"private"}'
+ request, channel = self.make_request(
+ "POST", "/createRoom", b'{"visibility":"private"}'
)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_custom_key(self):
# POST with custom config keys, expect new room id
- request, channel = make_request(b"POST", b"/createRoom", b'{"custom":"stuff"}')
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ request, channel = self.make_request(
+ "POST", "/createRoom", b'{"custom":"stuff"}'
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_known_and_unknown_keys(self):
# POST with custom + known config keys, expect new room id
- request, channel = make_request(
- b"POST", b"/createRoom", b'{"visibility":"private","custom":"things"}'
+ request, channel = self.make_request(
+ "POST", "/createRoom", b'{"visibility":"private","custom":"things"}'
)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_invalid_content(self):
# POST with invalid content / paths, expect 400
- request, channel = make_request(b"POST", b"/createRoom", b'{"visibili')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]))
+ request, channel = self.make_request("POST", "/createRoom", b'{"visibili')
+ self.render(request)
+ self.assertEquals(400, channel.code)
- request, channel = make_request(b"POST", b"/createRoom", b'["hello"]')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]))
+ request, channel = self.make_request("POST", "/createRoom", b'["hello"]')
+ self.render(request)
+ self.assertEquals(400, channel.code)
class RoomTopicTestCase(RoomBase):
""" Tests /rooms/$room_id/topic REST events. """
- user_id = b"@sid1:red"
-
- def setUp(self):
-
- super(RoomTopicTestCase, self).setUp()
+ user_id = "@sid1:red"
+ def prepare(self, reactor, clock, hs):
# create the room
self.room_id = self.helper.create_room_as(self.user_id)
- self.path = b"/rooms/%s/state/m.room.topic" % (self.room_id,)
+ self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,)
def test_invalid_puts(self):
# missing keys or invalid json
- request, channel = make_request(b"PUT", self.path, '{}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, '{}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", self.path, '{"_name":"bob"}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", self.path, '{"nao')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, '{"nao')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(
- b"PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]'
+ request, channel = self.make_request(
+ "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]'
)
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", self.path, 'text only')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, 'text only')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", self.path, '')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, '')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
# valid key, wrong type
content = '{"topic":["Topic name"]}'
- request, channel = make_request(b"PUT", self.path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, content)
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_topic(self):
# nothing should be there
- request, channel = make_request(b"GET", self.path)
- render(request, self.resource, self.clock)
- self.assertEquals(404, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", self.path)
+ self.render(request)
+ self.assertEquals(404, channel.code, msg=channel.result["body"])
# valid put
content = '{"topic":"Topic name"}'
- request, channel = make_request(b"PUT", self.path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
# valid get
- request, channel = make_request(b"GET", self.path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", self.path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assert_dict(json.loads(content), channel.json_body)
def test_rooms_topic_with_extra_keys(self):
# valid put with extra keys
content = '{"topic":"Seasons","subtopic":"Summer"}'
- request, channel = make_request(b"PUT", self.path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", self.path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
# valid get
- request, channel = make_request(b"GET", self.path)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", self.path)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assert_dict(json.loads(content), channel.json_body)
class RoomMemberStateTestCase(RoomBase):
""" Tests /rooms/$room_id/members/$user_id/state REST events. """
- user_id = b"@sid1:red"
-
- def setUp(self):
+ user_id = "@sid1:red"
- super(RoomMemberStateTestCase, self).setUp()
+ def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
- def tearDown(self):
- pass
-
def test_invalid_puts(self):
path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id)
# missing keys or invalid json
- request, channel = make_request(b"PUT", path, '{}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, '{}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '{"_name":"bob"}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, '{"_name":"bo"}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '{"nao')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, '{"nao')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(
- b"PUT", path, b'[{"_name":"bob"},{"_name":"jill"}]'
+ request, channel = self.make_request(
+ "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]'
)
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, 'text only')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, 'text only')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, '')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
# valid keys, wrong types
content = '{"membership":["%s","%s","%s"]}' % (
@@ -646,9 +602,9 @@ class RoomMemberStateTestCase(RoomBase):
Membership.JOIN,
Membership.LEAVE,
)
- request, channel = make_request(b"PUT", path, content.encode('ascii'))
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, content.encode('ascii'))
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_members_self(self):
path = "/rooms/%s/state/m.room.member/%s" % (
@@ -658,13 +614,13 @@ class RoomMemberStateTestCase(RoomBase):
# valid join message (NOOP since we made the room)
content = '{"membership":"%s"}' % Membership.JOIN
- request, channel = make_request(b"PUT", path, content.encode('ascii'))
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, content.encode('ascii'))
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"GET", path, None)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", path, None)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
expected_response = {"membership": Membership.JOIN}
self.assertEquals(expected_response, channel.json_body)
@@ -678,13 +634,13 @@ class RoomMemberStateTestCase(RoomBase):
# valid invite message
content = '{"membership":"%s"}' % Membership.INVITE
- request, channel = make_request(b"PUT", path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"GET", path, None)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", path, None)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assertEquals(json.loads(content), channel.json_body)
def test_rooms_members_other_custom_keys(self):
@@ -699,13 +655,13 @@ class RoomMemberStateTestCase(RoomBase):
Membership.INVITE,
"Join us!",
)
- request, channel = make_request(b"PUT", path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"GET", path, None)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("GET", path, None)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assertEquals(json.loads(content), channel.json_body)
@@ -714,60 +670,58 @@ class RoomMessagesTestCase(RoomBase):
user_id = "@sid1:red"
- def setUp(self):
- super(RoomMessagesTestCase, self).setUp()
-
+ def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
def test_invalid_puts(self):
path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
# missing keys or invalid json
- request, channel = make_request(b"PUT", path, '{}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, b'{}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '{"_name":"bob"}')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, b'{"_name":"bo"}')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '{"nao')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, b'{"nao')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(
- b"PUT", path, '[{"_name":"bob"},{"_name":"jill"}]'
+ request, channel = self.make_request(
+ "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]'
)
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, 'text only')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, b'text only')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
- request, channel = make_request(b"PUT", path, '')
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ request, channel = self.make_request("PUT", path, b'')
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_messages_sent(self):
path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
- content = '{"body":"test","msgtype":{"type":"a"}}'
- request, channel = make_request(b"PUT", path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"])
+ content = b'{"body":"test","msgtype":{"type":"a"}}'
+ request, channel = self.make_request("PUT", path, content)
+ self.render(request)
+ self.assertEquals(400, channel.code, msg=channel.result["body"])
# custom message types
- content = '{"body":"test","msgtype":"test.custom.text"}'
- request, channel = make_request(b"PUT", path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ content = b'{"body":"test","msgtype":"test.custom.text"}'
+ request, channel = self.make_request("PUT", path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
# m.text message type
path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id))
- content = '{"body":"test2","msgtype":"m.text"}'
- request, channel = make_request(b"PUT", path, content)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+ content = b'{"body":"test2","msgtype":"m.text"}'
+ request, channel = self.make_request("PUT", path, content)
+ self.render(request)
+ self.assertEquals(200, channel.code, msg=channel.result["body"])
class RoomInitialSyncTestCase(RoomBase):
@@ -775,16 +729,16 @@ class RoomInitialSyncTestCase(RoomBase):
user_id = "@sid1:red"
- def setUp(self):
- super(RoomInitialSyncTestCase, self).setUp()
-
+ def prepare(self, reactor, clock, hs):
# create the room
self.room_id = self.helper.create_room_as(self.user_id)
def test_initial_sync(self):
- request, channel = make_request(b"GET", "/rooms/%s/initialSync" % self.room_id)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ request, channel = self.make_request(
+ "GET", "/rooms/%s/initialSync" % self.room_id
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertEquals(self.room_id, channel.json_body["room_id"])
self.assertEquals("join", channel.json_body["membership"])
@@ -819,17 +773,16 @@ class RoomMessageListTestCase(RoomBase):
user_id = "@sid1:red"
- def setUp(self):
- super(RoomMessageListTestCase, self).setUp()
+ def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
def test_topo_token_is_accepted(self):
token = "t1-0_0_0_0_0_0_0_0_0"
- request, channel = make_request(
- b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
+ request, channel = self.make_request(
+ "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertTrue("start" in channel.json_body)
self.assertEquals(token, channel.json_body['start'])
self.assertTrue("chunk" in channel.json_body)
@@ -837,12 +790,116 @@ class RoomMessageListTestCase(RoomBase):
def test_stream_token_is_accepted_for_fwd_pagianation(self):
token = "s0_0_0_0_0_0_0_0_0"
- request, channel = make_request(
- b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
+ request, channel = self.make_request(
+ "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
)
- render(request, self.resource, self.clock)
- self.assertEquals(200, int(channel.result["code"]))
+ self.render(request)
+ self.assertEquals(200, channel.code)
self.assertTrue("start" in channel.json_body)
self.assertEquals(token, channel.json_body['start'])
self.assertTrue("chunk" in channel.json_body)
self.assertTrue("end" in channel.json_body)
+
+
+class RoomSearchTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+ user_id = True
+ hijack_auth = False
+
+ def prepare(self, reactor, clock, hs):
+
+ # Register the user who does the searching
+ self.user_id = self.register_user("user", "pass")
+ self.access_token = self.login("user", "pass")
+
+ # Register the user who sends the message
+ self.other_user_id = self.register_user("otheruser", "pass")
+ self.other_access_token = self.login("otheruser", "pass")
+
+ # Create a room
+ self.room = self.helper.create_room_as(self.user_id, tok=self.access_token)
+
+ # Invite the other person
+ self.helper.invite(
+ room=self.room,
+ src=self.user_id,
+ tok=self.access_token,
+ targ=self.other_user_id,
+ )
+
+ # The other user joins
+ self.helper.join(
+ room=self.room, user=self.other_user_id, tok=self.other_access_token
+ )
+
+ def test_finds_message(self):
+ """
+ The search functionality will search for content in messages if asked to
+ do so.
+ """
+ # The other user sends some messages
+ self.helper.send(self.room, body="Hi!", tok=self.other_access_token)
+ self.helper.send(self.room, body="There!", tok=self.other_access_token)
+
+ request, channel = self.make_request(
+ "POST",
+ "/search?access_token=%s" % (self.access_token,),
+ {
+ "search_categories": {
+ "room_events": {"keys": ["content.body"], "search_term": "Hi"}
+ }
+ },
+ )
+ self.render(request)
+
+ # Check we get the results we expect -- one search result, of the sent
+ # messages
+ self.assertEqual(channel.code, 200)
+ results = channel.json_body["search_categories"]["room_events"]
+ self.assertEqual(results["count"], 1)
+ self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!")
+
+ # No context was requested, so we should get none.
+ self.assertEqual(results["results"][0]["context"], {})
+
+ def test_include_context(self):
+ """
+ When event_context includes include_profile, profile information will be
+ included in the search response.
+ """
+ # The other user sends some messages
+ self.helper.send(self.room, body="Hi!", tok=self.other_access_token)
+ self.helper.send(self.room, body="There!", tok=self.other_access_token)
+
+ request, channel = self.make_request(
+ "POST",
+ "/search?access_token=%s" % (self.access_token,),
+ {
+ "search_categories": {
+ "room_events": {
+ "keys": ["content.body"],
+ "search_term": "Hi",
+ "event_context": {"include_profile": True},
+ }
+ }
+ },
+ )
+ self.render(request)
+
+ # Check we get the results we expect -- one search result, of the sent
+ # messages
+ self.assertEqual(channel.code, 200)
+ results = channel.json_body["search_categories"]["room_events"]
+ self.assertEqual(results["count"], 1)
+ self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!")
+
+ # We should get context info, like the two users, and the display names.
+ context = results["results"][0]["context"]
+ self.assertEqual(len(context["profile_info"].keys()), 2)
+ self.assertEqual(
+ context["profile_info"][self.other_user_id]["displayname"], "otheruser"
+ )
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 530dc8ba..9c401bf3 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -169,7 +169,7 @@ class RestHelper(object):
path = path + "?access_token=%s" % tok
request, channel = make_request(
- "POST", path, json.dumps(content).encode('utf8')
+ self.hs.get_reactor(), "POST", path, json.dumps(content).encode('utf8')
)
render(request, self.resource, self.hs.get_reactor())
@@ -217,7 +217,9 @@ class RestHelper(object):
data = {"membership": membership}
- request, channel = make_request("PUT", path, json.dumps(data).encode('utf8'))
+ request, channel = make_request(
+ self.hs.get_reactor(), "PUT", path, json.dumps(data).encode('utf8')
+ )
render(request, self.resource, self.hs.get_reactor())
@@ -228,18 +230,6 @@ class RestHelper(object):
self.auth_user_id = temp_id
- @defer.inlineCallbacks
- def register(self, user_id):
- (code, response) = yield self.mock_resource.trigger(
- "POST",
- "/_matrix/client/r0/register",
- json.dumps(
- {"user": user_id, "password": "test", "type": "m.login.password"}
- ),
- )
- self.assertEquals(200, code)
- defer.returnValue(response)
-
def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
@@ -251,7 +241,9 @@ class RestHelper(object):
if tok:
path = path + "?access_token=%s" % tok
- request, channel = make_request("PUT", path, json.dumps(content).encode('utf8'))
+ request, channel = make_request(
+ self.hs.get_reactor(), "PUT", path, json.dumps(content).encode('utf8')
+ )
render(request, self.resource, self.hs.get_reactor())
assert int(channel.result["code"]) == expect_code, (
diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py
index 6a886ee3..f42a8efb 100644
--- a/tests/rest/client/v2_alpha/test_filter.py
+++ b/tests/rest/client/v2_alpha/test_filter.py
@@ -13,84 +13,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import synapse.types
from synapse.api.errors import Codes
-from synapse.http.server import JsonResource
from synapse.rest.client.v2_alpha import filter
-from synapse.types import UserID
-from synapse.util import Clock
from tests import unittest
-from tests.server import (
- ThreadedMemoryReactorClock as MemoryReactorClock,
- make_request,
- render,
- setup_test_homeserver,
-)
PATH_PREFIX = "/_matrix/client/v2_alpha"
-class FilterTestCase(unittest.TestCase):
+class FilterTestCase(unittest.HomeserverTestCase):
- USER_ID = "@apple:test"
+ user_id = "@apple:test"
+ hijack_auth = True
EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}}
EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}'
- TO_REGISTER = [filter]
+ servlets = [filter.register_servlets]
- def setUp(self):
- self.clock = MemoryReactorClock()
- self.hs_clock = Clock(self.clock)
-
- self.hs = setup_test_homeserver(
- self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
- )
-
- self.auth = self.hs.get_auth()
-
- def get_user_by_access_token(token=None, allow_guest=False):
- return {
- "user": UserID.from_string(self.USER_ID),
- "token_id": 1,
- "is_guest": False,
- }
-
- def get_user_by_req(request, allow_guest=False, rights="access"):
- return synapse.types.create_requester(
- UserID.from_string(self.USER_ID), 1, False, None
- )
-
- self.auth.get_user_by_access_token = get_user_by_access_token
- self.auth.get_user_by_req = get_user_by_req
-
- self.store = self.hs.get_datastore()
- self.filtering = self.hs.get_filtering()
- self.resource = JsonResource(self.hs)
-
- for r in self.TO_REGISTER:
- r.register_servlets(self.hs, self.resource)
+ def prepare(self, reactor, clock, hs):
+ self.filtering = hs.get_filtering()
+ self.store = hs.get_datastore()
def test_add_filter(self):
- request, channel = make_request(
+ request, channel = self.make_request(
"POST",
- "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+ "/_matrix/client/r0/user/%s/filter" % (self.user_id),
self.EXAMPLE_FILTER_JSON,
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"200")
self.assertEqual(channel.json_body, {"filter_id": "0"})
filter = self.store.get_user_filter(user_localpart="apple", filter_id=0)
- self.clock.advance(0)
+ self.pump()
self.assertEquals(filter.result, self.EXAMPLE_FILTER)
def test_add_filter_for_other_user(self):
- request, channel = make_request(
+ request, channel = self.make_request(
"POST",
"/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"),
self.EXAMPLE_FILTER_JSON,
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"403")
self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN)
@@ -98,12 +61,12 @@ class FilterTestCase(unittest.TestCase):
def test_add_filter_non_local_user(self):
_is_mine = self.hs.is_mine
self.hs.is_mine = lambda target_user: False
- request, channel = make_request(
+ request, channel = self.make_request(
"POST",
- "/_matrix/client/r0/user/%s/filter" % (self.USER_ID),
+ "/_matrix/client/r0/user/%s/filter" % (self.user_id),
self.EXAMPLE_FILTER_JSON,
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.hs.is_mine = _is_mine
self.assertEqual(channel.result["code"], b"403")
@@ -113,21 +76,21 @@ class FilterTestCase(unittest.TestCase):
filter_id = self.filtering.add_user_filter(
user_localpart="apple", user_filter=self.EXAMPLE_FILTER
)
- self.clock.advance(1)
+ self.reactor.advance(1)
filter_id = filter_id.result
- request, channel = make_request(
- "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.USER_ID, filter_id)
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id)
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"200")
self.assertEquals(channel.json_body, self.EXAMPLE_FILTER)
def test_get_filter_non_existant(self):
- request, channel = make_request(
- "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.USER_ID)
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id)
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"400")
self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND)
@@ -135,18 +98,18 @@ class FilterTestCase(unittest.TestCase):
# Currently invalid params do not have an appropriate errcode
# in errors.py
def test_get_filter_invalid_id(self):
- request, channel = make_request(
- "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.USER_ID)
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id)
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"400")
# No ID also returns an invalid_id error
def test_get_filter_no_id(self):
- request, channel = make_request(
- "GET", "/_matrix/client/r0/user/%s/filter/" % (self.USER_ID)
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id)
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEqual(channel.result["code"], b"400")
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index 1c128e81..753d5c3e 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -3,22 +3,19 @@ import json
from mock import Mock
from twisted.python import failure
-from twisted.test.proto_helpers import MemoryReactorClock
from synapse.api.errors import InteractiveAuthIncompleteError
-from synapse.http.server import JsonResource
from synapse.rest.client.v2_alpha.register import register_servlets
-from synapse.util import Clock
from tests import unittest
-from tests.server import make_request, render, setup_test_homeserver
-class RegisterRestServletTestCase(unittest.TestCase):
- def setUp(self):
+class RegisterRestServletTestCase(unittest.HomeserverTestCase):
+
+ servlets = [register_servlets]
+
+ def make_homeserver(self, reactor, clock):
- self.clock = MemoryReactorClock()
- self.hs_clock = Clock(self.clock)
self.url = b"/_matrix/client/r0/register"
self.appservice = None
@@ -46,9 +43,7 @@ class RegisterRestServletTestCase(unittest.TestCase):
identity_handler=self.identity_handler,
login_handler=self.login_handler,
)
- self.hs = setup_test_homeserver(
- self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
- )
+ self.hs = self.setup_test_homeserver()
self.hs.get_auth = Mock(return_value=self.auth)
self.hs.get_handlers = Mock(return_value=self.handlers)
self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
@@ -58,8 +53,7 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.hs.config.registrations_require_3pid = []
self.hs.config.auto_join_rooms = []
- self.resource = JsonResource(self.hs)
- register_servlets(self.hs, self.resource)
+ return self.hs
def test_POST_appservice_registration_valid(self):
user_id = "@kermit:muppet"
@@ -69,10 +63,10 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
request_data = json.dumps({"username": "kermit"})
- request, channel = make_request(
+ request, channel = self.make_request(
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
det_data = {
@@ -85,25 +79,25 @@ class RegisterRestServletTestCase(unittest.TestCase):
def test_POST_appservice_registration_invalid(self):
self.appservice = None # no application service exists
request_data = json.dumps({"username": "kermit"})
- request, channel = make_request(
+ request, channel = self.make_request(
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
)
- render(request, self.resource, self.clock)
+ self.render(request)
self.assertEquals(channel.result["code"], b"401", channel.result)
def test_POST_bad_password(self):
request_data = json.dumps({"username": "kermit", "password": 666})
- request, channel = make_request(b"POST", self.url, request_data)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
self.assertEquals(channel.result["code"], b"400", channel.result)
self.assertEquals(channel.json_body["error"], "Invalid password")
def test_POST_bad_username(self):
request_data = json.dumps({"username": 777, "password": "monkey"})
- request, channel = make_request(b"POST", self.url, request_data)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
self.assertEquals(channel.result["code"], b"400", channel.result)
self.assertEquals(channel.json_body["error"], "Invalid username")
@@ -121,8 +115,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.auth_handler.get_access_token_for_user_id = Mock(return_value=token)
self.device_handler.check_device_registered = Mock(return_value=device_id)
- request, channel = make_request(b"POST", self.url, request_data)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
det_data = {
"user_id": user_id,
@@ -143,8 +137,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
self.registration_handler.register = Mock(return_value=("@user:id", "t"))
- request, channel = make_request(b"POST", self.url, request_data)
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
self.assertEquals(channel.result["code"], b"403", channel.result)
self.assertEquals(channel.json_body["error"], "Registration has been disabled")
@@ -155,8 +149,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.hs.config.allow_guest_access = True
self.registration_handler.register = Mock(return_value=(user_id, None))
- request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
+ self.render(request)
det_data = {
"user_id": user_id,
@@ -169,8 +163,8 @@ class RegisterRestServletTestCase(unittest.TestCase):
def test_POST_disabled_guest_registration(self):
self.hs.config.allow_guest_access = False
- request, channel = make_request(b"POST", self.url + b"?kind=guest", b"{}")
- render(request, self.resource, self.clock)
+ request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
+ self.render(request)
self.assertEquals(channel.result["code"], b"403", channel.result)
self.assertEquals(channel.json_body["error"], "Guest access is disabled")
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 560b1fba..99b716f0 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -15,9 +15,11 @@
from mock import Mock
+from synapse.rest.client.v1 import admin, login, room
from synapse.rest.client.v2_alpha import sync
from tests import unittest
+from tests.server import TimedOutException
class FilterTestCase(unittest.HomeserverTestCase):
@@ -62,12 +64,127 @@ class FilterTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200)
self.assertTrue(
set(
- [
- "next_batch",
- "rooms",
- "account_data",
- "to_device",
- "device_lists",
- ]
+ ["next_batch", "rooms", "account_data", "to_device", "device_lists"]
).issubset(set(channel.json_body.keys()))
)
+
+
+class SyncTypingTests(unittest.HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ sync.register_servlets,
+ ]
+ user_id = True
+ hijack_auth = False
+
+ def test_sync_backwards_typing(self):
+ """
+ If the typing serial goes backwards and the typing handler is then reset
+ (such as when the master restarts and sets the typing serial to 0), we
+ do not incorrectly return typing information that had a serial greater
+ than the now-reset serial.
+ """
+ typing_url = "/rooms/%s/typing/%s?access_token=%s"
+ sync_url = "/sync?timeout=3000000&access_token=%s&since=%s"
+
+ # Register the user who gets notified
+ user_id = self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # Register the user who sends the message
+ other_user_id = self.register_user("otheruser", "pass")
+ other_access_token = self.login("otheruser", "pass")
+
+ # Create a room
+ room = self.helper.create_room_as(user_id, tok=access_token)
+
+ # Invite the other person
+ self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
+
+ # The other user joins
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # The other user sends some messages
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.helper.send(room, body="There!", tok=other_access_token)
+
+ # Start typing.
+ request, channel = self.make_request(
+ "PUT",
+ typing_url % (room, other_user_id, other_access_token),
+ b'{"typing": true, "timeout": 30000}',
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+
+ request, channel = self.make_request(
+ "GET", "/sync?access_token=%s" % (access_token,)
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+ next_batch = channel.json_body["next_batch"]
+
+ # Stop typing.
+ request, channel = self.make_request(
+ "PUT",
+ typing_url % (room, other_user_id, other_access_token),
+ b'{"typing": false}',
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+
+ # Start typing.
+ request, channel = self.make_request(
+ "PUT",
+ typing_url % (room, other_user_id, other_access_token),
+ b'{"typing": true, "timeout": 30000}',
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+
+ # Should return immediately
+ request, channel = self.make_request(
+ "GET", sync_url % (access_token, next_batch)
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+ next_batch = channel.json_body["next_batch"]
+
+ # Reset typing serial back to 0, as if the master had.
+ typing = self.hs.get_typing_handler()
+ typing._latest_room_serial = 0
+
+ # Since it checks the state token, we need some state to update to
+ # invalidate the stream token.
+ self.helper.send(room, body="There!", tok=other_access_token)
+
+ request, channel = self.make_request(
+ "GET", sync_url % (access_token, next_batch)
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+ next_batch = channel.json_body["next_batch"]
+
+ # This should time out! But it does not, because our stream token is
+ # ahead, and therefore it's saying the typing (that we've actually
+ # already seen) is new, since it's got a token above our new, now-reset
+ # stream token.
+ request, channel = self.make_request(
+ "GET", sync_url % (access_token, next_batch)
+ )
+ self.render(request)
+ self.assertEquals(200, channel.code)
+ next_batch = channel.json_body["next_batch"]
+
+ # Clear the typing information, so that it doesn't think everything is
+ # in the future.
+ typing._reset()
+
+ # Now it SHOULD fail as it never completes!
+ request, channel = self.make_request(
+ "GET", sync_url % (access_token, next_batch)
+ )
+ self.assertRaises(TimedOutException, self.render, request)
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
new file mode 100644
index 00000000..29579cf0
--- /dev/null
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from mock import Mock
+
+from twisted.internet.defer import Deferred
+
+from synapse.config.repository import MediaStorageProviderConfig
+from synapse.util.module_loader import load_module
+
+from tests import unittest
+
+
+class URLPreviewTests(unittest.HomeserverTestCase):
+
+ hijack_auth = True
+ user_id = "@test:user"
+
+ def make_homeserver(self, reactor, clock):
+
+ self.storage_path = self.mktemp()
+ os.mkdir(self.storage_path)
+
+ config = self.default_config()
+ config.url_preview_enabled = True
+ config.max_spider_size = 9999999
+ config.url_preview_url_blacklist = []
+ config.media_store_path = self.storage_path
+
+ provider_config = {
+ "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ loaded = list(load_module(provider_config)) + [
+ MediaStorageProviderConfig(False, False, False)
+ ]
+
+ config.media_storage_providers = [loaded]
+
+ hs = self.setup_test_homeserver(config=config)
+
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+
+ self.fetches = []
+
+ def get_file(url, output_stream, max_size):
+ """
+ Returns tuple[int,dict,str,int] of file length, response headers,
+ absolute URI, and response code.
+ """
+
+ def write_to(r):
+ data, response = r
+ output_stream.write(data)
+ return response
+
+ d = Deferred()
+ d.addCallback(write_to)
+ self.fetches.append((d, url))
+ return d
+
+ client = Mock()
+ client.get_file = get_file
+
+ self.media_repo = hs.get_media_repository_resource()
+ preview_url = self.media_repo.children[b'preview_url']
+ preview_url.client = client
+ self.preview_url = preview_url
+
+ def test_cache_returns_correct_type(self):
+
+ request, channel = self.make_request(
+ "GET", "url_preview?url=matrix.org", shorthand=False
+ )
+ request.render(self.preview_url)
+ self.pump()
+
+ # We've made one fetch
+ self.assertEqual(len(self.fetches), 1)
+
+ end_content = (
+ b'<html><head>'
+ b'<meta property="og:title" content="~matrix~" />'
+ b'<meta property="og:description" content="hi" />'
+ b'</head></html>'
+ )
+
+ self.fetches[0][0].callback(
+ (
+ end_content,
+ (
+ len(end_content),
+ {
+ b"Content-Length": [b"%d" % (len(end_content))],
+ b"Content-Type": [b'text/html; charset="utf8"'],
+ },
+ "https://example.com",
+ 200,
+ ),
+ )
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ # Check the cache returns the correct response
+ request, channel = self.make_request(
+ "GET", "url_preview?url=matrix.org", shorthand=False
+ )
+ request.render(self.preview_url)
+ self.pump()
+
+ # Only one fetch, still, since we'll lean on the cache
+ self.assertEqual(len(self.fetches), 1)
+
+ # Check the cache response has the same content
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
+
+ # Clear the in-memory cache
+ self.assertIn("matrix.org", self.preview_url._cache)
+ self.preview_url._cache.pop("matrix.org")
+ self.assertNotIn("matrix.org", self.preview_url._cache)
+
+ # Check the database cache returns the correct response
+ request, channel = self.make_request(
+ "GET", "url_preview?url=matrix.org", shorthand=False
+ )
+ request.render(self.preview_url)
+ self.pump()
+
+ # Only one fetch, still, since we'll lean on the cache
+ self.assertEqual(len(self.fetches), 1)
+
+ # Check the cache response has the same content
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
+ )
diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/scripts/__init__.py
diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py
new file mode 100644
index 00000000..6f56893f
--- /dev/null
+++ b/tests/scripts/test_new_matrix_user.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from synapse._scripts.register_new_matrix_user import request_registration
+
+from tests.unittest import TestCase
+
+
+class RegisterTestCase(TestCase):
+ def test_success(self):
+ """
+ The script will fetch a nonce, and then generate a MAC with it, and then
+ post that MAC.
+ """
+
+ def get(url, verify=None):
+ r = Mock()
+ r.status_code = 200
+ r.json = lambda: {"nonce": "a"}
+ return r
+
+ def post(url, json=None, verify=None):
+ # Make sure we are sent the correct info
+ self.assertEqual(json["username"], "user")
+ self.assertEqual(json["password"], "pass")
+ self.assertEqual(json["nonce"], "a")
+ # We want a 40-char hex MAC
+ self.assertEqual(len(json["mac"]), 40)
+
+ r = Mock()
+ r.status_code = 200
+ return r
+
+ requests = Mock()
+ requests.get = get
+ requests.post = post
+
+ # The fake stdout will be written here
+ out = []
+ err_code = []
+
+ request_registration(
+ "user",
+ "pass",
+ "matrix.org",
+ "shared",
+ admin=False,
+ requests=requests,
+ _print=out.append,
+ exit=err_code.append,
+ )
+
+ # We should get the success message making sure everything is OK.
+ self.assertIn("Success!", out)
+
+ # sys.exit shouldn't have been called.
+ self.assertEqual(err_code, [])
+
+ def test_failure_nonce(self):
+ """
+ If the script fails to fetch a nonce, it throws an error and quits.
+ """
+
+ def get(url, verify=None):
+ r = Mock()
+ r.status_code = 404
+ r.reason = "Not Found"
+ r.json = lambda: {"not": "error"}
+ return r
+
+ requests = Mock()
+ requests.get = get
+
+ # The fake stdout will be written here
+ out = []
+ err_code = []
+
+ request_registration(
+ "user",
+ "pass",
+ "matrix.org",
+ "shared",
+ admin=False,
+ requests=requests,
+ _print=out.append,
+ exit=err_code.append,
+ )
+
+ # Exit was called
+ self.assertEqual(err_code, [1])
+
+ # We got an error message
+ self.assertIn("ERROR! Received 404 Not Found", out)
+ self.assertNotIn("Success!", out)
+
+ def test_failure_post(self):
+ """
+ The script will fetch a nonce, and then if the final POST fails, will
+ report an error and quit.
+ """
+
+ def get(url, verify=None):
+ r = Mock()
+ r.status_code = 200
+ r.json = lambda: {"nonce": "a"}
+ return r
+
+ def post(url, json=None, verify=None):
+ # Make sure we are sent the correct info
+ self.assertEqual(json["username"], "user")
+ self.assertEqual(json["password"], "pass")
+ self.assertEqual(json["nonce"], "a")
+ # We want a 40-char hex MAC
+ self.assertEqual(len(json["mac"]), 40)
+
+ r = Mock()
+ # Then 500 because we're jerks
+ r.status_code = 500
+ r.reason = "Broken"
+ return r
+
+ requests = Mock()
+ requests.get = get
+ requests.post = post
+
+ # The fake stdout will be written here
+ out = []
+ err_code = []
+
+ request_registration(
+ "user",
+ "pass",
+ "matrix.org",
+ "shared",
+ admin=False,
+ requests=requests,
+ _print=out.append,
+ exit=err_code.append,
+ )
+
+ # Exit was called
+ self.assertEqual(err_code, [1])
+
+ # We got an error message
+ self.assertIn("ERROR! Received 500 Broken", out)
+ self.assertNotIn("Success!", out)
diff --git a/tests/server.py b/tests/server.py
index 615bba1b..7919a1f1 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -4,9 +4,14 @@ from io import BytesIO
from six import text_type
import attr
+from zope.interface import implementer
-from twisted.internet import address, threads
+from twisted.internet import address, threads, udp
+from twisted.internet._resolver import HostResolution
+from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
+from twisted.internet.error import DNSLookupError
+from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock
@@ -16,6 +21,12 @@ from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
+class TimedOutException(Exception):
+ """
+ A web query timed out.
+ """
+
+
@attr.s
class FakeChannel(object):
"""
@@ -23,6 +34,7 @@ class FakeChannel(object):
wire).
"""
+ _reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@@ -45,6 +57,8 @@ class FakeChannel(object):
self.result["headers"] = headers
def write(self, content):
+ assert isinstance(content, bytes), "Should be bytes! " + repr(content)
+
if "body" not in self.result:
self.result["body"] = b""
@@ -52,6 +66,15 @@ class FakeChannel(object):
def registerProducer(self, producer, streaming):
self._producer = producer
+ self.producerStreaming = streaming
+
+ def _produce():
+ if self._producer:
+ self._producer.resumeProducing()
+ self._reactor.callLater(0.1, _produce)
+
+ if not streaming:
+ self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
@@ -65,7 +88,7 @@ class FakeChannel(object):
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
- return address.IPv4Address(b"TCP", "127.0.0.1", 3423)
+ return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@@ -93,10 +116,30 @@ class FakeSite:
return FakeLogger()
-def make_request(method, path, content=b"", access_token=None):
+def make_request(
+ reactor,
+ method,
+ path,
+ content=b"",
+ access_token=None,
+ request=SynapseRequest,
+ shorthand=True,
+):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
+
+ Args:
+ method (bytes/unicode): The HTTP request method ("verb").
+ path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
+ escaped UTF-8 & spaces and such).
+ content (bytes or dict): The body of the request. JSON-encoded, if
+ a dict.
+ shorthand: Whether to try and be helpful and prefix the given URL
+ with the usual REST API path, if it doesn't contain it.
+
+ Returns:
+ A synapse.http.site.SynapseRequest.
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
@@ -104,8 +147,8 @@ def make_request(method, path, content=b"", access_token=None):
if not isinstance(path, bytes):
path = path.encode('ascii')
- # Decorate it to be the full path
- if not path.startswith(b"/_matrix"):
+ # Decorate it to be the full path, if we're using shorthand
+ if shorthand and not path.startswith(b"/_matrix"):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
@@ -113,16 +156,20 @@ def make_request(method, path, content=b"", access_token=None):
content = content.encode('utf8')
site = FakeSite()
- channel = FakeChannel()
+ channel = FakeChannel(reactor)
- req = SynapseRequest(site, channel)
+ req = request(site, channel)
req.process = lambda: b""
req.content = BytesIO(content)
if access_token:
- req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token)
+ req.requestHeaders.addRawHeader(
+ b"Authorization", b"Bearer " + access_token.encode('ascii')
+ )
+
+ if content:
+ req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
- req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1")
req.requestReceived(method, path, b"1.1")
return req, channel
@@ -144,7 +191,7 @@ def wait_until_result(clock, request, timeout=100):
x += 1
if x > timeout:
- raise Exception("Timed out waiting for request to finish.")
+ raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
@@ -154,11 +201,46 @@ def render(request, resource, clock):
wait_until_result(clock, request)
+@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
+ def __init__(self):
+ self._udp = []
+ self.lookups = {}
+
+ class Resolver(object):
+ def resolveHostName(
+ _self,
+ resolutionReceiver,
+ hostName,
+ portNumber=0,
+ addressTypes=None,
+ transportSemantics='TCP',
+ ):
+
+ resolution = HostResolution(hostName)
+ resolutionReceiver.resolutionBegan(resolution)
+ if hostName not in self.lookups:
+ raise DNSLookupError("OH NO")
+
+ resolutionReceiver.addressResolved(
+ IPv4Address('TCP', self.lookups[hostName], portNumber)
+ )
+ resolutionReceiver.resolutionComplete()
+ return resolution
+
+ self.nameResolver = Resolver()
+ super(ThreadedMemoryReactorClock, self).__init__()
+
+ def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
+ p = udp.Port(port, protocol, interface, maxPacketSize, self)
+ p.startListening()
+ self._udp.append(p)
+ return p
+
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
@@ -240,3 +322,84 @@ def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return (clock, hs_clock)
+
+
+@attr.s
+class FakeTransport(object):
+ """
+ A twisted.internet.interfaces.ITransport implementation which sends all its data
+ straight into an IProtocol object: it exists to connect two IProtocols together.
+
+ To use it, instantiate it with the receiving IProtocol, and then pass it to the
+ sending IProtocol's makeConnection method:
+
+ server = HTTPChannel()
+ client.makeConnection(FakeTransport(server, self.reactor))
+
+ If you want bidirectional communication, you'll need two instances.
+ """
+
+ other = attr.ib()
+ """The Protocol object which will receive any data written to this transport.
+
+ :type: twisted.internet.interfaces.IProtocol
+ """
+
+ _reactor = attr.ib()
+ """Test reactor
+
+ :type: twisted.internet.interfaces.IReactorTime
+ """
+
+ disconnecting = False
+ buffer = attr.ib(default=b'')
+ producer = attr.ib(default=None)
+
+ def getPeer(self):
+ return None
+
+ def getHost(self):
+ return None
+
+ def loseConnection(self):
+ self.disconnecting = True
+
+ def abortConnection(self):
+ self.disconnecting = True
+
+ def pauseProducing(self):
+ self.producer.pauseProducing()
+
+ def unregisterProducer(self):
+ if not self.producer:
+ return
+
+ self.producer = None
+
+ def registerProducer(self, producer, streaming):
+ self.producer = producer
+ self.producerStreaming = streaming
+
+ def _produce():
+ d = self.producer.resumeProducing()
+ d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
+
+ if not streaming:
+ self._reactor.callLater(0.0, _produce)
+
+ def write(self, byt):
+ self.buffer = self.buffer + byt
+
+ def _write():
+ if getattr(self.other, "transport") is not None:
+ self.other.dataReceived(self.buffer)
+ self.buffer = b""
+ return
+
+ self._reactor.callLater(0.0, _write)
+
+ _write()
+
+ def writeSequence(self, seq):
+ for x in seq:
+ self.write(x)
diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py
new file mode 100644
index 00000000..95badc98
--- /dev/null
+++ b/tests/server_notices/test_consent.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.rest.client.v1 import admin, login, room
+from synapse.rest.client.v2_alpha import sync
+
+from tests import unittest
+
+
+class ConsentNoticesTests(unittest.HomeserverTestCase):
+
+ servlets = [
+ sync.register_servlets,
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+
+ self.consent_notice_message = "consent %(consent_uri)s"
+ config = self.default_config()
+ config.user_consent_version = "1"
+ config.user_consent_server_notice_content = {
+ "msgtype": "m.text",
+ "body": self.consent_notice_message,
+ }
+ config.public_baseurl = "https://example.com/"
+ config.form_secret = "123abc"
+
+ config.server_notices_mxid = "@notices:test"
+ config.server_notices_mxid_display_name = "test display name"
+ config.server_notices_mxid_avatar_url = None
+ config.server_notices_room_name = "Server Notices"
+
+ hs = self.setup_test_homeserver(config=config)
+
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("bob", "abc123")
+ self.access_token = self.login("bob", "abc123")
+
+ def test_get_sync_message(self):
+ """
+ When user consent server notices are enabled, a sync will cause a notice
+ to fire (in a room which the user is invited to). The notice contains
+ the notice URL + an authentication code.
+ """
+ # Initial sync, to get the user consent room invite
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/sync", access_token=self.access_token
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Get the Room ID to join
+ room_id = list(channel.json_body["rooms"]["invite"].keys())[0]
+
+ # Join the room
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/rooms/" + room_id + "/join",
+ access_token=self.access_token,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Sync again, to get the message in the room
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/sync", access_token=self.access_token
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Get the message
+ room = channel.json_body["rooms"]["join"][room_id]
+ messages = [
+ x for x in room["timeline"]["events"] if x["type"] == "m.room.message"
+ ]
+
+ # One message, with the consent URL
+ self.assertEqual(len(messages), 1)
+ self.assertTrue(
+ messages[0]["content"]["body"].startswith(
+ "consent https://example.com/_matrix/consent"
+ )
+ )
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 5cc7fff3..b1551df7 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -4,7 +4,6 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, ServerNoticeMsgType
from synapse.api.errors import ResourceLimitError
-from synapse.handlers.auth import AuthHandler
from synapse.server_notices.resource_limits_server_notices import (
ResourceLimitsServerNotices,
)
@@ -13,17 +12,10 @@ from tests import unittest
from tests.utils import setup_test_homeserver
-class AuthHandlers(object):
- def __init__(self, hs):
- self.auth_handler = AuthHandler(hs)
-
-
class TestResourceLimitsServerNotices(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
- self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None)
- self.hs.handlers = AuthHandlers(self.hs)
- self.auth_handler = self.hs.handlers.auth_handler
+ self.hs = yield setup_test_homeserver(self.addCleanup)
self.server_notices_sender = self.hs.get_server_notices_sender()
# relying on [1] is far from ideal, but the only case where
@@ -80,12 +72,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase):
self._rlsn._auth.check_auth_blocking = Mock()
mock_event = Mock(
- type=EventTypes.Message,
- content={"msgtype": ServerNoticeMsgType},
+ type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
+ )
+ self._rlsn._store.get_events = Mock(
+ return_value=defer.succeed({"123": mock_event})
)
- self._rlsn._store.get_events = Mock(return_value=defer.succeed(
- {"123": mock_event}
- ))
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
# Would be better to check the content, but once == remove blocking event
@@ -99,12 +90,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase):
)
mock_event = Mock(
- type=EventTypes.Message,
- content={"msgtype": ServerNoticeMsgType},
+ type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
+ )
+ self._rlsn._store.get_events = Mock(
+ return_value=defer.succeed({"123": mock_event})
)
- self._rlsn._store.get_events = Mock(return_value=defer.succeed(
- {"123": mock_event}
- ))
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
self._send_notice.assert_not_called()
@@ -177,13 +167,9 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
@defer.inlineCallbacks
def test_server_notice_only_sent_once(self):
- self.store.get_monthly_active_count = Mock(
- return_value=1000,
- )
+ self.store.get_monthly_active_count = Mock(return_value=1000)
- self.store.user_last_seen_monthly_active = Mock(
- return_value=1000,
- )
+ self.store.user_last_seen_monthly_active = Mock(return_value=1000)
# Call the function multiple times to ensure we only send the notice once
yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
@@ -193,12 +179,12 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
# Now lets get the last load of messages in the service notice room and
# check that there is only one server notice
room_id = yield self.server_notices_manager.get_notice_room_for_user(
- self.user_id,
+ self.user_id
)
token = yield self.event_source.get_current_token()
events, _ = yield self.store.get_recent_events_for_room(
- room_id, limit=100, end_token=token.room_key,
+ room_id, limit=100, end_token=token.room_key
)
count = 0
diff --git a/tests/state/__init__.py b/tests/state/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/state/__init__.py
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
new file mode 100644
index 00000000..2e073a3a
--- /dev/null
+++ b/tests/state/test_v2.py
@@ -0,0 +1,759 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import itertools
+
+from six.moves import zip
+
+import attr
+
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.event_auth import auth_types_for_event
+from synapse.events import FrozenEvent
+from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store
+from synapse.types import EventID
+
+from tests import unittest
+
+ALICE = "@alice:example.com"
+BOB = "@bob:example.com"
+CHARLIE = "@charlie:example.com"
+EVELYN = "@evelyn:example.com"
+ZARA = "@zara:example.com"
+
+ROOM_ID = "!test:example.com"
+
+MEMBERSHIP_CONTENT_JOIN = {"membership": Membership.JOIN}
+MEMBERSHIP_CONTENT_BAN = {"membership": Membership.BAN}
+
+
+ORIGIN_SERVER_TS = 0
+
+
+class FakeEvent(object):
+ """A fake event we use as a convenience.
+
+ NOTE: Again as a convenience we use "node_ids" rather than event_ids to
+ refer to events. The event_id has node_id as localpart and example.com
+ as domain.
+ """
+ def __init__(self, id, sender, type, state_key, content):
+ self.node_id = id
+ self.event_id = EventID(id, "example.com").to_string()
+ self.sender = sender
+ self.type = type
+ self.state_key = state_key
+ self.content = content
+
+ def to_event(self, auth_events, prev_events):
+ """Given the auth_events and prev_events, convert to a Frozen Event
+
+ Args:
+ auth_events (list[str]): list of event_ids
+ prev_events (list[str]): list of event_ids
+
+ Returns:
+ FrozenEvent
+ """
+ global ORIGIN_SERVER_TS
+
+ ts = ORIGIN_SERVER_TS
+ ORIGIN_SERVER_TS = ORIGIN_SERVER_TS + 1
+
+ event_dict = {
+ "auth_events": [(a, {}) for a in auth_events],
+ "prev_events": [(p, {}) for p in prev_events],
+ "event_id": self.node_id,
+ "sender": self.sender,
+ "type": self.type,
+ "content": self.content,
+ "origin_server_ts": ts,
+ "room_id": ROOM_ID,
+ }
+
+ if self.state_key is not None:
+ event_dict["state_key"] = self.state_key
+
+ return FrozenEvent(event_dict)
+
+
+# All graphs start with this set of events
+INITIAL_EVENTS = [
+ FakeEvent(
+ id="CREATE",
+ sender=ALICE,
+ type=EventTypes.Create,
+ state_key="",
+ content={"creator": ALICE},
+ ),
+ FakeEvent(
+ id="IMA",
+ sender=ALICE,
+ type=EventTypes.Member,
+ state_key=ALICE,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ),
+ FakeEvent(
+ id="IPOWER",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key="",
+ content={"users": {ALICE: 100}},
+ ),
+ FakeEvent(
+ id="IJR",
+ sender=ALICE,
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.PUBLIC},
+ ),
+ FakeEvent(
+ id="IMB",
+ sender=BOB,
+ type=EventTypes.Member,
+ state_key=BOB,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ),
+ FakeEvent(
+ id="IMC",
+ sender=CHARLIE,
+ type=EventTypes.Member,
+ state_key=CHARLIE,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ),
+ FakeEvent(
+ id="IMZ",
+ sender=ZARA,
+ type=EventTypes.Member,
+ state_key=ZARA,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ),
+ FakeEvent(
+ id="START",
+ sender=ZARA,
+ type=EventTypes.Message,
+ state_key=None,
+ content={},
+ ),
+ FakeEvent(
+ id="END",
+ sender=ZARA,
+ type=EventTypes.Message,
+ state_key=None,
+ content={},
+ ),
+]
+
+INITIAL_EDGES = [
+ "START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE",
+]
+
+
+class StateTestCase(unittest.TestCase):
+ def test_ban_vs_pl(self):
+ events = [
+ FakeEvent(
+ id="PA",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key="",
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ }
+ },
+ ),
+ FakeEvent(
+ id="MA",
+ sender=ALICE,
+ type=EventTypes.Member,
+ state_key=ALICE,
+ content={"membership": Membership.JOIN},
+ ),
+ FakeEvent(
+ id="MB",
+ sender=ALICE,
+ type=EventTypes.Member,
+ state_key=BOB,
+ content={"membership": Membership.BAN},
+ ),
+ FakeEvent(
+ id="PB",
+ sender=BOB,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ ]
+
+ edges = [
+ ["END", "MB", "MA", "PA", "START"],
+ ["END", "PB", "PA"],
+ ]
+
+ expected_state_ids = ["PA", "MA", "MB"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def test_join_rule_evasion(self):
+ events = [
+ FakeEvent(
+ id="JR",
+ sender=ALICE,
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rules": JoinRules.PRIVATE},
+ ),
+ FakeEvent(
+ id="ME",
+ sender=EVELYN,
+ type=EventTypes.Member,
+ state_key=EVELYN,
+ content={"membership": Membership.JOIN},
+ ),
+ ]
+
+ edges = [
+ ["END", "JR", "START"],
+ ["END", "ME", "START"],
+ ]
+
+ expected_state_ids = ["JR"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def test_offtopic_pl(self):
+ events = [
+ FakeEvent(
+ id="PA",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key="",
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ }
+ },
+ ),
+ FakeEvent(
+ id="PB",
+ sender=BOB,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ CHARLIE: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="PC",
+ sender=CHARLIE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ CHARLIE: 0,
+ },
+ },
+ ),
+ ]
+
+ edges = [
+ ["END", "PC", "PB", "PA", "START"],
+ ["END", "PA"],
+ ]
+
+ expected_state_ids = ["PC"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def test_topic_basic(self):
+ events = [
+ FakeEvent(
+ id="T1",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="PA1",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="T2",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="PA2",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 0,
+ },
+ },
+ ),
+ FakeEvent(
+ id="PB",
+ sender=BOB,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="T3",
+ sender=BOB,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ ]
+
+ edges = [
+ ["END", "PA2", "T2", "PA1", "T1", "START"],
+ ["END", "T3", "PB", "PA1"],
+ ]
+
+ expected_state_ids = ["PA2", "T2"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def test_topic_reset(self):
+ events = [
+ FakeEvent(
+ id="T1",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="PA",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="T2",
+ sender=BOB,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="MB",
+ sender=ALICE,
+ type=EventTypes.Member,
+ state_key=BOB,
+ content={"membership": Membership.BAN},
+ ),
+ ]
+
+ edges = [
+ ["END", "MB", "T2", "PA", "T1", "START"],
+ ["END", "T1"],
+ ]
+
+ expected_state_ids = ["T1", "MB", "PA"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def test_topic(self):
+ events = [
+ FakeEvent(
+ id="T1",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="PA1",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="T2",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="PA2",
+ sender=ALICE,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 0,
+ },
+ },
+ ),
+ FakeEvent(
+ id="PB",
+ sender=BOB,
+ type=EventTypes.PowerLevels,
+ state_key='',
+ content={
+ "users": {
+ ALICE: 100,
+ BOB: 50,
+ },
+ },
+ ),
+ FakeEvent(
+ id="T3",
+ sender=BOB,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ FakeEvent(
+ id="MZ1",
+ sender=ZARA,
+ type=EventTypes.Message,
+ state_key=None,
+ content={},
+ ),
+ FakeEvent(
+ id="T4",
+ sender=ALICE,
+ type=EventTypes.Topic,
+ state_key="",
+ content={},
+ ),
+ ]
+
+ edges = [
+ ["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"],
+ ["END", "MZ1", "T3", "PB", "PA1"],
+ ]
+
+ expected_state_ids = ["T4", "PA2"]
+
+ self.do_check(events, edges, expected_state_ids)
+
+ def do_check(self, events, edges, expected_state_ids):
+ """Take a list of events and edges and calculate the state of the
+ graph at END, and asserts it matches `expected_state_ids`
+
+ Args:
+ events (list[FakeEvent])
+ edges (list[list[str]]): A list of chains of event edges, e.g.
+ `[[A, B, C]]` are edges A->B and B->C.
+ expected_state_ids (list[str]): The expected state at END, (excluding
+ the keys that haven't changed since START).
+ """
+ # We want to sort the events into topological order for processing.
+ graph = {}
+
+ # node_id -> FakeEvent
+ fake_event_map = {}
+
+ for ev in itertools.chain(INITIAL_EVENTS, events):
+ graph[ev.node_id] = set()
+ fake_event_map[ev.node_id] = ev
+
+ for a, b in pairwise(INITIAL_EDGES):
+ graph[a].add(b)
+
+ for edge_list in edges:
+ for a, b in pairwise(edge_list):
+ graph[a].add(b)
+
+ # event_id -> FrozenEvent
+ event_map = {}
+ # node_id -> state
+ state_at_event = {}
+
+ # We copy the map as the sort consumes the graph
+ graph_copy = {k: set(v) for k, v in graph.items()}
+
+ for node_id in lexicographical_topological_sort(graph_copy, key=lambda e: e):
+ fake_event = fake_event_map[node_id]
+ event_id = fake_event.event_id
+
+ prev_events = list(graph[node_id])
+
+ if len(prev_events) == 0:
+ state_before = {}
+ elif len(prev_events) == 1:
+ state_before = dict(state_at_event[prev_events[0]])
+ else:
+ state_d = resolve_events_with_store(
+ [state_at_event[n] for n in prev_events],
+ event_map=event_map,
+ state_res_store=TestStateResolutionStore(event_map),
+ )
+
+ state_before = self.successResultOf(state_d)
+
+ state_after = dict(state_before)
+ if fake_event.state_key is not None:
+ state_after[(fake_event.type, fake_event.state_key)] = event_id
+
+ auth_types = set(auth_types_for_event(fake_event))
+
+ auth_events = []
+ for key in auth_types:
+ if key in state_before:
+ auth_events.append(state_before[key])
+
+ event = fake_event.to_event(auth_events, prev_events)
+
+ state_at_event[node_id] = state_after
+ event_map[event_id] = event
+
+ expected_state = {}
+ for node_id in expected_state_ids:
+ # expected_state_ids are node IDs rather than event IDs,
+ # so we have to convert
+ event_id = EventID(node_id, "example.com").to_string()
+ event = event_map[event_id]
+
+ key = (event.type, event.state_key)
+
+ expected_state[key] = event_id
+
+ start_state = state_at_event["START"]
+ end_state = {
+ key: value
+ for key, value in state_at_event["END"].items()
+ if key in expected_state or start_state.get(key) != value
+ }
+
+ self.assertEqual(expected_state, end_state)
+
+
+class LexicographicalTestCase(unittest.TestCase):
+ def test_simple(self):
+ graph = {
+ "l": {"o"},
+ "m": {"n", "o"},
+ "n": {"o"},
+ "o": set(),
+ "p": {"o"},
+ }
+
+ res = list(lexicographical_topological_sort(graph, key=lambda x: x))
+
+ self.assertEqual(["o", "l", "n", "m", "p"], res)
+
+
+class SimpleParamStateTestCase(unittest.TestCase):
+ def setUp(self):
+ # We build up a simple DAG.
+
+ event_map = {}
+
+ create_event = FakeEvent(
+ id="CREATE",
+ sender=ALICE,
+ type=EventTypes.Create,
+ state_key="",
+ content={"creator": ALICE},
+ ).to_event([], [])
+ event_map[create_event.event_id] = create_event
+
+ alice_member = FakeEvent(
+ id="IMA",
+ sender=ALICE,
+ type=EventTypes.Member,
+ state_key=ALICE,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ).to_event([create_event.event_id], [create_event.event_id])
+ event_map[alice_member.event_id] = alice_member
+
+ join_rules = FakeEvent(
+ id="IJR",
+ sender=ALICE,
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.PUBLIC},
+ ).to_event(
+ auth_events=[create_event.event_id, alice_member.event_id],
+ prev_events=[alice_member.event_id],
+ )
+ event_map[join_rules.event_id] = join_rules
+
+ # Bob and Charlie join at the same time, so there is a fork
+ bob_member = FakeEvent(
+ id="IMB",
+ sender=BOB,
+ type=EventTypes.Member,
+ state_key=BOB,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ).to_event(
+ auth_events=[create_event.event_id, join_rules.event_id],
+ prev_events=[join_rules.event_id],
+ )
+ event_map[bob_member.event_id] = bob_member
+
+ charlie_member = FakeEvent(
+ id="IMC",
+ sender=CHARLIE,
+ type=EventTypes.Member,
+ state_key=CHARLIE,
+ content=MEMBERSHIP_CONTENT_JOIN,
+ ).to_event(
+ auth_events=[create_event.event_id, join_rules.event_id],
+ prev_events=[join_rules.event_id],
+ )
+ event_map[charlie_member.event_id] = charlie_member
+
+ self.event_map = event_map
+ self.create_event = create_event
+ self.alice_member = alice_member
+ self.join_rules = join_rules
+ self.bob_member = bob_member
+ self.charlie_member = charlie_member
+
+ self.state_at_bob = {
+ (e.type, e.state_key): e.event_id
+ for e in [create_event, alice_member, join_rules, bob_member]
+ }
+
+ self.state_at_charlie = {
+ (e.type, e.state_key): e.event_id
+ for e in [create_event, alice_member, join_rules, charlie_member]
+ }
+
+ self.expected_combined_state = {
+ (e.type, e.state_key): e.event_id
+ for e in [create_event, alice_member, join_rules, bob_member, charlie_member]
+ }
+
+ def test_event_map_none(self):
+ # Test that we correctly handle passing `None` as the event_map
+
+ state_d = resolve_events_with_store(
+ [self.state_at_bob, self.state_at_charlie],
+ event_map=None,
+ state_res_store=TestStateResolutionStore(self.event_map),
+ )
+
+ state = self.successResultOf(state_d)
+
+ self.assert_dict(self.expected_combined_state, state)
+
+
+def pairwise(iterable):
+ "s -> (s0,s1), (s1,s2), (s2, s3), ..."
+ a, b = itertools.tee(iterable)
+ next(b, None)
+ return zip(a, b)
+
+
+@attr.s
+class TestStateResolutionStore(object):
+ event_map = attr.ib()
+
+ def get_events(self, event_ids, allow_rejected=False):
+ """Get events from the database
+
+ Args:
+ event_ids (list): The event_ids of the events to fetch
+ allow_rejected (bool): If True return rejected events.
+
+ Returns:
+ Deferred[dict[str, FrozenEvent]]: Dict from event_id to event.
+ """
+
+ return {
+ eid: self.event_map[eid]
+ for eid in event_ids
+ if eid in self.event_map
+ }
+
+ def get_auth_chain(self, event_ids):
+ """Gets the full auth chain for a set of events (including rejected
+ events).
+
+ Includes the given event IDs in the result.
+
+ Note that:
+ 1. All events must be state events.
+ 2. For v1 rooms this may not have the full auth chain in the
+ presence of rejected events
+
+ Args:
+ event_ids (list): The event IDs of the events to fetch the auth
+ chain for. Must be state events.
+
+ Returns:
+ Deferred[list[str]]: List of event IDs of the auth chain.
+ """
+
+ # Simple DFS for auth chain
+ result = set()
+ stack = list(event_ids)
+ while stack:
+ event_id = stack.pop()
+ if event_id in result:
+ continue
+
+ result.add(event_id)
+
+ event = self.event_map[event_id]
+ for aid in event.auth_event_ids():
+ stack.append(aid)
+
+ return list(result)
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index c2e88bdb..4577e942 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,35 +13,41 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
from mock import Mock
from twisted.internet import defer
-import tests.unittest
-import tests.utils
+from synapse.http.site import XForwardedForRequest
+from synapse.rest.client.v1 import admin, login
+
+from tests import unittest
-class ClientIpStoreTestCase(tests.unittest.TestCase):
- def __init__(self, *args, **kwargs):
- super(ClientIpStoreTestCase, self).__init__(*args, **kwargs)
- self.store = None # type: synapse.storage.DataStore
- self.clock = None # type: tests.utils.MockClock
+class ClientIpStoreTestCase(unittest.HomeserverTestCase):
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver()
+ return hs
- @defer.inlineCallbacks
- def setUp(self):
- self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
+ def prepare(self, hs, reactor, clock):
self.store = self.hs.get_datastore()
- self.clock = self.hs.get_clock()
- @defer.inlineCallbacks
def test_insert_new_client_ip(self):
- self.clock.now = 12345678
+ self.reactor.advance(12345678)
+
user_id = "@user:id"
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
)
- result = yield self.store.get_last_client_ip_by_device(user_id, "device_id")
+ # Trigger the storage loop
+ self.reactor.advance(10)
+
+ result = self.get_success(
+ self.store.get_last_client_ip_by_device(user_id, "device_id")
+ )
r = result[(user_id, "device_id")]
self.assertDictContainsSubset(
@@ -55,18 +62,18 @@ class ClientIpStoreTestCase(tests.unittest.TestCase):
r,
)
- @defer.inlineCallbacks
def test_disabled_monthly_active_user(self):
self.hs.config.limit_usage_by_mau = False
self.hs.config.max_mau_value = 50
user_id = "@user:server"
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
)
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertFalse(active)
- @defer.inlineCallbacks
def test_adding_monthly_active_user_when_full(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
@@ -76,40 +83,119 @@ class ClientIpStoreTestCase(tests.unittest.TestCase):
self.store.get_monthly_active_count = Mock(
return_value=defer.succeed(lots_of_users)
)
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
)
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertFalse(active)
- @defer.inlineCallbacks
def test_adding_monthly_active_user_when_space(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
user_id = "@user:server"
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertFalse(active)
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
+ # Trigger the saving loop
+ self.reactor.advance(10)
+
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
)
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertTrue(active)
- @defer.inlineCallbacks
def test_updating_monthly_active_user_when_space(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
user_id = "@user:server"
+ self.get_success(
+ self.store.register(user_id=user_id, token="123", password_hash=None)
+ )
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertFalse(active)
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
- )
- yield self.store.insert_client_ip(
- user_id, "access_token", "ip", "user_agent", "device_id"
+ # Trigger the saving loop
+ self.reactor.advance(10)
+
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
)
- active = yield self.store.user_last_seen_monthly_active(user_id)
+ active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertTrue(active)
+
+
+class ClientIpAuthTestCase(unittest.HomeserverTestCase):
+
+ servlets = [admin.register_servlets, login.register_servlets]
+
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver()
+ return hs
+
+ def prepare(self, hs, reactor, clock):
+ self.store = self.hs.get_datastore()
+ self.user_id = self.register_user("bob", "abc123", True)
+
+ def test_request_with_xforwarded(self):
+ """
+ The IP in X-Forwarded-For is entered into the client IPs table.
+ """
+ self._runtest(
+ {b"X-Forwarded-For": b"127.9.0.1"},
+ "127.9.0.1",
+ {"request": XForwardedForRequest},
+ )
+
+ def test_request_from_getPeer(self):
+ """
+ The IP returned by getPeer is entered into the client IPs table, if
+ there's no X-Forwarded-For header.
+ """
+ self._runtest({}, "127.0.0.1", {})
+
+ def _runtest(self, headers, expected_ip, make_request_args):
+ device_id = "bleb"
+
+ access_token = self.login("bob", "abc123", device_id=device_id)
+
+ # Advance to a known time
+ self.reactor.advance(123456 - self.reactor.seconds())
+
+ request, channel = self.make_request(
+ "GET",
+ "/_matrix/client/r0/admin/users/" + self.user_id,
+ access_token=access_token,
+ **make_request_args
+ )
+ request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza")
+
+ # Add the optional headers
+ for h, v in headers.items():
+ request.requestHeaders.addRawHeader(h, v)
+ self.render(request)
+
+ # Advance so the save loop occurs
+ self.reactor.advance(100)
+
+ result = self.get_success(
+ self.store.get_last_client_ip_by_device(self.user_id, device_id)
+ )
+ r = result[(self.user_id, device_id)]
+ self.assertDictContainsSubset(
+ {
+ "user_id": self.user_id,
+ "device_id": device_id,
+ "ip": expected_ip,
+ "user_agent": "Mozzila pizza",
+ "last_seen": 123456100,
+ },
+ r,
+ )
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
index 8f0aaece..b83f7336 100644
--- a/tests/storage/test_end_to_end_keys.py
+++ b/tests/storage/test_end_to_end_keys.py
@@ -45,6 +45,21 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
self.assertDictContainsSubset({"keys": json, "device_display_name": None}, dev)
@defer.inlineCallbacks
+ def test_reupload_key(self):
+ now = 1470174257070
+ json = {"key": "value"}
+
+ yield self.store.store_device("user", "device", None)
+
+ changed = yield self.store.set_e2e_device_keys("user", "device", now, json)
+ self.assertTrue(changed)
+
+ # If we try to upload the same key then we should be told nothing
+ # changed
+ changed = yield self.store.set_e2e_device_keys("user", "device", now, json)
+ self.assertFalse(changed)
+
+ @defer.inlineCallbacks
def test_get_key_with_device_name(self):
now = 1470174257070
json = {"key": "value"}
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 20362872..832e379a 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -12,6 +12,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from mock import Mock
+
+from twisted.internet import defer
from tests.unittest import HomeserverTestCase
@@ -23,7 +26,8 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
hs = self.setup_test_homeserver()
self.store = hs.get_datastore()
-
+ hs.config.limit_usage_by_mau = True
+ hs.config.max_mau_value = 50
# Advance the clock a bit
reactor.advance(FORTY_DAYS)
@@ -48,7 +52,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
now = int(self.hs.get_clock().time_msec())
self.store.user_add_threepid(user1, "email", user1_email, now, now)
self.store.user_add_threepid(user2, "email", user2_email, now, now)
- self.store.initialise_reserved_users(threepids)
+
+ self.store.runInteraction(
+ "initialise", self.store._initialise_reserved_users, threepids
+ )
self.pump()
active_count = self.store.get_monthly_active_count()
@@ -73,7 +80,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
active_count = self.store.get_monthly_active_count()
self.assertEquals(self.get_success(active_count), user_num)
- # Test that regalar users are removed from the db
+ # Test that regular users are removed from the db
ru_count = 2
self.store.upsert_monthly_active_user("@ru1:server")
self.store.upsert_monthly_active_user("@ru2:server")
@@ -139,3 +146,77 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase):
count = self.store.get_monthly_active_count()
self.assertEquals(self.get_success(count), 0)
+
+ def test_populate_monthly_users_is_guest(self):
+ # Test that guest users are not added to mau list
+ user_id = "user_id"
+ self.store.register(
+ user_id=user_id, token="123", password_hash=None, make_guest=True
+ )
+ self.store.upsert_monthly_active_user = Mock()
+ self.store.populate_monthly_active_users(user_id)
+ self.pump()
+ self.store.upsert_monthly_active_user.assert_not_called()
+
+ def test_populate_monthly_users_should_update(self):
+ self.store.upsert_monthly_active_user = Mock()
+
+ self.store.is_trial_user = Mock(
+ return_value=defer.succeed(False)
+ )
+
+ self.store.user_last_seen_monthly_active = Mock(
+ return_value=defer.succeed(None)
+ )
+ self.store.populate_monthly_active_users('user_id')
+ self.pump()
+ self.store.upsert_monthly_active_user.assert_called_once()
+
+ def test_populate_monthly_users_should_not_update(self):
+ self.store.upsert_monthly_active_user = Mock()
+
+ self.store.is_trial_user = Mock(
+ return_value=defer.succeed(False)
+ )
+ self.store.user_last_seen_monthly_active = Mock(
+ return_value=defer.succeed(
+ self.hs.get_clock().time_msec()
+ )
+ )
+ self.store.populate_monthly_active_users('user_id')
+ self.pump()
+ self.store.upsert_monthly_active_user.assert_not_called()
+
+ def test_get_reserved_real_user_account(self):
+ # Test no reserved users, or reserved threepids
+ count = self.store.get_registered_reserved_users_count()
+ self.assertEquals(self.get_success(count), 0)
+ # Test reserved users but no registered users
+
+ user1 = '@user1:example.com'
+ user2 = '@user2:example.com'
+ user1_email = 'user1@example.com'
+ user2_email = 'user2@example.com'
+ threepids = [
+ {'medium': 'email', 'address': user1_email},
+ {'medium': 'email', 'address': user2_email},
+ ]
+ self.hs.config.mau_limits_reserved_threepids = threepids
+ self.store.runInteraction(
+ "initialise", self.store._initialise_reserved_users, threepids
+ )
+
+ self.pump()
+ count = self.store.get_registered_reserved_users_count()
+ self.assertEquals(self.get_success(count), 0)
+
+ # Test reserved registed users
+ self.store.register(user_id=user1, token="123", password_hash=None)
+ self.store.register(user_id=user2, token="456", password_hash=None)
+ self.pump()
+
+ now = int(self.hs.get_clock().time_msec())
+ self.store.user_add_threepid(user1, "email", user1_email, now, now)
+ self.store.user_add_threepid(user2, "email", user2_email, now, now)
+ count = self.store.get_registered_reserved_users_count()
+ self.assertEquals(self.get_success(count), len(threepids))
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index d717b9f9..086a39d8 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -18,6 +18,7 @@ import logging
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
+from synapse.storage.state import StateFilter
from synapse.types import RoomID, UserID
import tests.unittest
@@ -75,6 +76,45 @@ class StateStoreTestCase(tests.unittest.TestCase):
self.assertEqual(len(s1), len(s2))
@defer.inlineCallbacks
+ def test_get_state_groups_ids(self):
+ e1 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, '', {}
+ )
+ e2 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"}
+ )
+
+ state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id])
+ self.assertEqual(len(state_group_map), 1)
+ state_map = list(state_group_map.values())[0]
+ self.assertDictEqual(
+ state_map,
+ {
+ (EventTypes.Create, ''): e1.event_id,
+ (EventTypes.Name, ''): e2.event_id,
+ },
+ )
+
+ @defer.inlineCallbacks
+ def test_get_state_groups(self):
+ e1 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, '', {}
+ )
+ e2 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"}
+ )
+
+ state_group_map = yield self.store.get_state_groups(
+ self.room, [e2.event_id])
+ self.assertEqual(len(state_group_map), 1)
+ state_list = list(state_group_map.values())[0]
+
+ self.assertEqual(
+ {ev.event_id for ev in state_list},
+ {e1.event_id, e2.event_id},
+ )
+
+ @defer.inlineCallbacks
def test_get_state_for_event(self):
# this defaults to a linear DAG as each new injection defaults to whatever
@@ -109,7 +149,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
# check we get the full state as of the final event
state = yield self.store.get_state_for_event(
- e5.event_id, None, filtered_types=None
+ e5.event_id,
)
self.assertIsNotNone(e4)
@@ -127,33 +167,35 @@ class StateStoreTestCase(tests.unittest.TestCase):
# check we can filter to the m.room.name event (with a '' state key)
state = yield self.store.get_state_for_event(
- e5.event_id, [(EventTypes.Name, '')], filtered_types=None
+ e5.event_id, StateFilter.from_types([(EventTypes.Name, '')])
)
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
# check we can filter to the m.room.name event (with a wildcard None state key)
state = yield self.store.get_state_for_event(
- e5.event_id, [(EventTypes.Name, None)], filtered_types=None
+ e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
)
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
# check we can grab the m.room.member events (with a wildcard None state key)
state = yield self.store.get_state_for_event(
- e5.event_id, [(EventTypes.Member, None)], filtered_types=None
+ e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
)
self.assertStateMapEqual(
{(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state
)
- # check we can use filtered_types to grab a specific room member
- # without filtering out the other event types
+ # check we can grab a specific room member without filtering out the
+ # other event types
state = yield self.store.get_state_for_event(
e5.event_id,
- [(EventTypes.Member, self.u_alice.to_string())],
- filtered_types=[EventTypes.Member],
+ state_filter=StateFilter(
+ types={EventTypes.Member: {self.u_alice.to_string()}},
+ include_others=True,
+ )
)
self.assertStateMapEqual(
@@ -165,10 +207,12 @@ class StateStoreTestCase(tests.unittest.TestCase):
state,
)
- # check that types=[], filtered_types=[EventTypes.Member]
- # doesn't return all members
+ # check that we can grab everything except members
state = yield self.store.get_state_for_event(
- e5.event_id, [], filtered_types=[EventTypes.Member]
+ e5.event_id, state_filter=StateFilter(
+ types={EventTypes.Member: set()},
+ include_others=True,
+ ),
)
self.assertStateMapEqual(
@@ -176,17 +220,21 @@ class StateStoreTestCase(tests.unittest.TestCase):
)
#######################################################
- # _get_some_state_from_cache tests against a full cache
+ # _get_state_for_group_using_cache tests against a full cache
#######################################################
room_id = self.room.to_string()
group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
group = list(group_ids.keys())[0]
- # test _get_some_state_from_cache correctly filters out members with types=[]
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
- self.store._state_group_cache,
- group, [], filtered_types=[EventTypes.Member]
+ # test _get_state_for_group_using_cache correctly filters out members
+ # with types=[]
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
+ self.store._state_group_cache, group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: set()},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
@@ -198,21 +246,27 @@ class StateStoreTestCase(tests.unittest.TestCase):
state_dict,
)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: set()},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
- self.assertDictEqual(
- {},
- state_dict,
- )
+ self.assertDictEqual({}, state_dict)
- # test _get_some_state_from_cache correctly filters in members with wildcard types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # with wildcard types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_cache,
- group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: None},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
@@ -224,9 +278,13 @@ class StateStoreTestCase(tests.unittest.TestCase):
state_dict,
)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: None},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
@@ -239,12 +297,15 @@ class StateStoreTestCase(tests.unittest.TestCase):
state_dict,
)
- # test _get_some_state_from_cache correctly filters in members with specific types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # with specific types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_cache,
group,
- [(EventTypes.Member, e5.state_key)],
- filtered_types=[EventTypes.Member],
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
@@ -256,26 +317,27 @@ class StateStoreTestCase(tests.unittest.TestCase):
state_dict,
)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
group,
- [(EventTypes.Member, e5.state_key)],
- filtered_types=[EventTypes.Member],
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
- self.assertDictEqual(
- {
- (e5.type, e5.state_key): e5.event_id,
- },
- state_dict,
- )
+ self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
- # test _get_some_state_from_cache correctly filters in members with specific types
- # and no filtered_types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # with specific types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=False,
+ ),
)
self.assertEqual(is_all, True)
@@ -305,9 +367,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
key=group,
value=state_dict_ids,
# list fetched keys so it knows it's partial
- fetched_keys=(
- (e1.type, e1.state_key),
- ),
+ fetched_keys=((e1.type, e1.state_key),),
)
(is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(
@@ -315,60 +375,60 @@ class StateStoreTestCase(tests.unittest.TestCase):
)
self.assertEqual(is_all, False)
- self.assertEqual(
- known_absent,
- set(
- [
- (e1.type, e1.state_key),
- ]
- ),
- )
- self.assertDictEqual(
- state_dict_ids,
- {
- (e1.type, e1.state_key): e1.event_id,
- },
- )
+ self.assertEqual(known_absent, set([(e1.type, e1.state_key)]))
+ self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id})
############################################
# test that things work with a partial cache
- # test _get_some_state_from_cache correctly filters out members with types=[]
+ # test _get_state_for_group_using_cache correctly filters out members
+ # with types=[]
room_id = self.room.to_string()
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
- self.store._state_group_cache,
- group, [], filtered_types=[EventTypes.Member]
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
+ self.store._state_group_cache, group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: set()},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, False)
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
room_id = self.room.to_string()
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: set()},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
self.assertDictEqual({}, state_dict)
- # test _get_some_state_from_cache correctly filters in members wildcard types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # wildcard types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_cache,
- group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: None},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, False)
- self.assertDictEqual(
- {
- (e1.type, e1.state_key): e1.event_id,
- },
- state_dict,
- )
+ self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: None},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
@@ -380,56 +440,54 @@ class StateStoreTestCase(tests.unittest.TestCase):
state_dict,
)
- # test _get_some_state_from_cache correctly filters in members with specific types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # with specific types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_cache,
group,
- [(EventTypes.Member, e5.state_key)],
- filtered_types=[EventTypes.Member],
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, False)
- self.assertDictEqual(
- {
- (e1.type, e1.state_key): e1.event_id,
- },
- state_dict,
- )
+ self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
group,
- [(EventTypes.Member, e5.state_key)],
- filtered_types=[EventTypes.Member],
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=True,
+ ),
)
self.assertEqual(is_all, True)
- self.assertDictEqual(
- {
- (e5.type, e5.state_key): e5.event_id,
- },
- state_dict,
- )
+ self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
- # test _get_some_state_from_cache correctly filters in members with specific types
- # and no filtered_types
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ # test _get_state_for_group_using_cache correctly filters in members
+ # with specific types
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_cache,
- group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=False,
+ ),
)
self.assertEqual(is_all, False)
self.assertDictEqual({}, state_dict)
- (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
self.store._state_group_members_cache,
- group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+ group,
+ state_filter=StateFilter(
+ types={EventTypes.Member: {e5.state_key}},
+ include_others=False,
+ ),
)
self.assertEqual(is_all, True)
- self.assertDictEqual(
- {
- (e5.type, e5.state_key): e5.event_id,
- },
- state_dict,
- )
+ self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
new file mode 100644
index 00000000..14169afa
--- /dev/null
+++ b/tests/storage/test_transactions.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests.unittest import HomeserverTestCase
+
+
+class TransactionStoreTestCase(HomeserverTestCase):
+ def prepare(self, reactor, clock, homeserver):
+ self.store = homeserver.get_datastore()
+
+ def test_get_set_transactions(self):
+ """Tests that we can successfully get a non-existent entry for
+ destination retries, as well as testing tht we can set and get
+ correctly.
+ """
+ d = self.store.get_destination_retry_timings("example.com")
+ r = self.get_success(d)
+ self.assertIsNone(r)
+
+ d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ self.get_success(d)
+
+ d = self.store.get_destination_retry_timings("example.com")
+ r = self.get_success(d)
+
+ self.assert_dict({"retry_last_ts": 50, "retry_interval": 100}, r)
+
+ def test_initial_set_transactions(self):
+ """Tests that we can successfully set the destination retries (there
+ was a bug around invalidating the cache that broke this)
+ """
+ d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ self.get_success(d)
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 2540604f..e1a34ccf 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -6,6 +6,7 @@ from twisted.internet.defer import maybeDeferred, succeed
from synapse.events import FrozenEvent
from synapse.types import Requester, UserID
from synapse.util import Clock
+from synapse.util.logcontext import LoggingContext
from tests import unittest
from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
@@ -111,15 +112,16 @@ class MessageAcceptTests(unittest.TestCase):
"origin_server_ts": 1,
"type": "m.room.message",
"origin": "test.serv",
- "content": "hewwo?",
+ "content": {"body": "hewwo?"},
"auth_events": [],
"prev_events": [("two:test.serv", {}), (most_recent, {})],
}
)
- d = self.handler.on_receive_pdu(
- "test.serv", lying_event, sent_to_us_directly=True
- )
+ with LoggingContext(request="lying_event"):
+ d = self.handler.on_receive_pdu(
+ "test.serv", lying_event, sent_to_us_directly=True
+ )
# Step the reactor, so the database fetches come back
self.reactor.advance(1)
@@ -139,107 +141,3 @@ class MessageAcceptTests(unittest.TestCase):
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
-
- def test_cant_hide_past_history(self):
- """
- If you send a message, you must be able to provide the direct
- prev_events that said event references.
- """
-
- def post_json(destination, path, data, headers=None, timeout=0):
- if path.startswith("/_matrix/federation/v1/get_missing_events/"):
- return {
- "events": [
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "three:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [("four:test.serv", {})],
- }
- ]
- }
-
- self.http_client.post_json = post_json
-
- def get_json(destination, path, args, headers=None):
- if path.startswith("/_matrix/federation/v1/state_ids/"):
- d = self.successResultOf(
- self.homeserver.datastore.get_state_ids_for_event("one:test.serv")
- )
-
- return succeed(
- {
- "pdu_ids": [
- y
- for x, y in d.items()
- if x == ("m.room.member", "@us:test")
- ],
- "auth_chain_ids": list(d.values()),
- }
- )
-
- self.http_client.get_json = get_json
-
- # Figure out what the most recent event is
- most_recent = self.successResultOf(
- maybeDeferred(
- self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
- )
- )[0]
-
- # Make a good event
- good_event = FrozenEvent(
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "one:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [(most_recent, {})],
- }
- )
-
- d = self.handler.on_receive_pdu(
- "test.serv", good_event, sent_to_us_directly=True
- )
- self.reactor.advance(1)
- self.assertEqual(self.successResultOf(d), None)
-
- bad_event = FrozenEvent(
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "two:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [("one:test.serv", {}), ("three:test.serv", {})],
- }
- )
-
- d = self.handler.on_receive_pdu(
- "test.serv", bad_event, sent_to_us_directly=True
- )
- self.reactor.advance(1)
-
- extrem = maybeDeferred(
- self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
- )
- self.assertEqual(self.successResultOf(extrem)[0], "two:test.serv")
-
- state = self.homeserver.get_state_handler().get_current_state_ids(self.room_id)
- self.reactor.advance(1)
- self.assertIn(("m.room.member", "@us:test"), self.successResultOf(state).keys())
diff --git a/tests/test_mau.py b/tests/test_mau.py
index 07326154..0afdeb08 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -21,30 +21,20 @@ from mock import Mock, NonCallableMock
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, HttpResponseException, SynapseError
-from synapse.http.server import JsonResource
from synapse.rest.client.v2_alpha import register, sync
-from synapse.util import Clock
from tests import unittest
-from tests.server import (
- ThreadedMemoryReactorClock,
- make_request,
- render,
- setup_test_homeserver,
-)
-class TestMauLimit(unittest.TestCase):
- def setUp(self):
- self.reactor = ThreadedMemoryReactorClock()
- self.clock = Clock(self.reactor)
+class TestMauLimit(unittest.HomeserverTestCase):
- self.hs = setup_test_homeserver(
- self.addCleanup,
+ servlets = [register.register_servlets, sync.register_servlets]
+
+ def make_homeserver(self, reactor, clock):
+
+ self.hs = self.setup_test_homeserver(
"red",
http_client=None,
- clock=self.clock,
- reactor=self.reactor,
federation_client=Mock(),
ratelimiter=NonCallableMock(spec_set=["send_message"]),
)
@@ -63,10 +53,7 @@ class TestMauLimit(unittest.TestCase):
self.hs.config.server_notices_mxid_display_name = None
self.hs.config.server_notices_mxid_avatar_url = None
self.hs.config.server_notices_room_name = "Test Server Notice Room"
-
- self.resource = JsonResource(self.hs)
- register.register_servlets(self.hs, self.resource)
- sync.register_servlets(self.hs, self.resource)
+ return self.hs
def test_simple_deny_mau(self):
# Create and sync so that the MAU counts get updated
@@ -185,20 +172,20 @@ class TestMauLimit(unittest.TestCase):
self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
def create_user(self, localpart):
- request_data = json.dumps({
- "username": localpart,
- "password": "monkey",
- "auth": {"type": LoginType.DUMMY},
- })
+ request_data = json.dumps(
+ {
+ "username": localpart,
+ "password": "monkey",
+ "auth": {"type": LoginType.DUMMY},
+ }
+ )
- request, channel = make_request(b"POST", b"/register", request_data)
- render(request, self.resource, self.reactor)
+ request, channel = self.make_request("POST", "/register", request_data)
+ self.render(request)
- if channel.result["code"] != b"200":
+ if channel.code != 200:
raise HttpResponseException(
- int(channel.result["code"]),
- channel.result["reason"],
- channel.result["body"],
+ channel.code, channel.result["reason"], channel.result["body"]
).to_synapse_error()
access_token = channel.json_body["access_token"]
@@ -206,12 +193,12 @@ class TestMauLimit(unittest.TestCase):
return access_token
def do_sync_for_user(self, token):
- request, channel = make_request(b"GET", b"/sync", access_token=token)
- render(request, self.resource, self.reactor)
+ request, channel = self.make_request(
+ "GET", "/sync", access_token=token
+ )
+ self.render(request)
- if channel.result["code"] != b"200":
+ if channel.code != 200:
raise HttpResponseException(
- int(channel.result["code"]),
- channel.result["reason"],
- channel.result["body"],
+ channel.code, channel.result["reason"], channel.result["body"]
).to_synapse_error()
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
new file mode 100644
index 00000000..17897711
--- /dev/null
+++ b/tests/test_metrics.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.metrics import InFlightGauge
+
+from tests import unittest
+
+
+class TestMauLimit(unittest.TestCase):
+ def test_basic(self):
+ gauge = InFlightGauge(
+ "test1", "",
+ labels=["test_label"],
+ sub_metrics=["foo", "bar"],
+ )
+
+ def handle1(metrics):
+ metrics.foo += 2
+ metrics.bar = max(metrics.bar, 5)
+
+ def handle2(metrics):
+ metrics.foo += 3
+ metrics.bar = max(metrics.bar, 7)
+
+ gauge.register(("key1",), handle1)
+
+ self.assert_dict({
+ "test1_total": {("key1",): 1},
+ "test1_foo": {("key1",): 2},
+ "test1_bar": {("key1",): 5},
+ }, self.get_metrics_from_gauge(gauge))
+
+ gauge.unregister(("key1",), handle1)
+
+ self.assert_dict({
+ "test1_total": {("key1",): 0},
+ "test1_foo": {("key1",): 0},
+ "test1_bar": {("key1",): 0},
+ }, self.get_metrics_from_gauge(gauge))
+
+ gauge.register(("key1",), handle1)
+ gauge.register(("key2",), handle2)
+
+ self.assert_dict({
+ "test1_total": {("key1",): 1, ("key2",): 1},
+ "test1_foo": {("key1",): 2, ("key2",): 3},
+ "test1_bar": {("key1",): 5, ("key2",): 7},
+ }, self.get_metrics_from_gauge(gauge))
+
+ gauge.unregister(("key2",), handle2)
+ gauge.register(("key1",), handle2)
+
+ self.assert_dict({
+ "test1_total": {("key1",): 2, ("key2",): 0},
+ "test1_foo": {("key1",): 5, ("key2",): 0},
+ "test1_bar": {("key1",): 7, ("key2",): 0},
+ }, self.get_metrics_from_gauge(gauge))
+
+ def get_metrics_from_gauge(self, gauge):
+ results = {}
+
+ for r in gauge.collect():
+ results[r.name] = {
+ tuple(labels[x] for x in gauge.labels): value
+ for _, labels, value in r.samples
+ }
+
+ return results
diff --git a/tests/test_server.py b/tests/test_server.py
index ef74544e..f0e6291b 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -1,14 +1,35 @@
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
import re
+from six import StringIO
+
from twisted.internet.defer import Deferred
-from twisted.test.proto_helpers import MemoryReactorClock
+from twisted.python.failure import Failure
+from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
from synapse.api.errors import Codes, SynapseError
from synapse.http.server import JsonResource
+from synapse.http.site import SynapseSite, logger
from synapse.util import Clock
from tests import unittest
-from tests.server import make_request, render, setup_test_homeserver
+from tests.server import FakeTransport, make_request, render, setup_test_homeserver
class JsonResourceTests(unittest.TestCase):
@@ -36,7 +57,9 @@ class JsonResourceTests(unittest.TestCase):
"GET", [re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")], _callback
)
- request, channel = make_request(b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83")
+ request, channel = make_request(
+ self.reactor, b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83"
+ )
render(request, res, self.reactor)
self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]})
@@ -54,7 +77,7 @@ class JsonResourceTests(unittest.TestCase):
res = JsonResource(self.homeserver)
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
- request, channel = make_request(b"GET", b"/_matrix/foo")
+ request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b'500')
@@ -77,7 +100,7 @@ class JsonResourceTests(unittest.TestCase):
res = JsonResource(self.homeserver)
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
- request, channel = make_request(b"GET", b"/_matrix/foo")
+ request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b'500')
@@ -94,7 +117,7 @@ class JsonResourceTests(unittest.TestCase):
res = JsonResource(self.homeserver)
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
- request, channel = make_request(b"GET", b"/_matrix/foo")
+ request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b'403')
@@ -115,9 +138,58 @@ class JsonResourceTests(unittest.TestCase):
res = JsonResource(self.homeserver)
res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)
- request, channel = make_request(b"GET", b"/_matrix/foobar")
+ request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b'400')
self.assertEqual(channel.json_body["error"], "Unrecognized request")
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
+
+
+class SiteTestCase(unittest.HomeserverTestCase):
+ def test_lose_connection(self):
+ """
+ We log the URI correctly redacted when we lose the connection.
+ """
+
+ class HangingResource(Resource):
+ """
+ A Resource that strategically hangs, as if it were processing an
+ answer.
+ """
+
+ def render(self, request):
+ return NOT_DONE_YET
+
+ # Set up a logging handler that we can inspect afterwards
+ output = StringIO()
+ handler = logging.StreamHandler(output)
+ logger.addHandler(handler)
+ old_level = logger.level
+ logger.setLevel(10)
+ self.addCleanup(logger.setLevel, old_level)
+ self.addCleanup(logger.removeHandler, handler)
+
+ # Make a resource and a Site, the resource will hang and allow us to
+ # time out the request while it's 'processing'
+ base_resource = Resource()
+ base_resource.putChild(b'', HangingResource())
+ site = SynapseSite("test", "site_tag", {}, base_resource, "1.0")
+
+ server = site.buildProtocol(None)
+ client = AccumulatingProtocol()
+ client.makeConnection(FakeTransport(server, self.reactor))
+ server.makeConnection(FakeTransport(client, self.reactor))
+
+ # Send a request with an access token that will get redacted
+ server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n")
+ self.pump()
+
+ # Lose the connection
+ e = Failure(Exception("Failed123"))
+ server.connectionLost(e)
+ handler.flush()
+
+ # Our access token is redacted and the failure reason is logged.
+ self.assertIn("/?access_token=<redacted>", output.getvalue())
+ self.assertIn("Failed123", output.getvalue())
diff --git a/tests/test_state.py b/tests/test_state.py
index 452a123c..e20c3332 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -180,7 +180,7 @@ class StateTestCase(unittest.TestCase):
graph = Graph(
nodes={
"START": DictObj(
- type=EventTypes.Create, state_key="", content={}, depth=1,
+ type=EventTypes.Create, state_key="", content={}, depth=1
),
"A": DictObj(type=EventTypes.Message, depth=2),
"B": DictObj(type=EventTypes.Message, depth=3),
diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py
new file mode 100644
index 00000000..9ecc3ef1
--- /dev/null
+++ b/tests/test_terms_auth.py
@@ -0,0 +1,123 @@
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import six
+from mock import Mock
+
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.rest.client.v2_alpha.register import register_servlets
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class TermsTestCase(unittest.HomeserverTestCase):
+ servlets = [register_servlets]
+
+ def prepare(self, reactor, clock, hs):
+ self.clock = MemoryReactorClock()
+ self.hs_clock = Clock(self.clock)
+ self.url = "/_matrix/client/r0/register"
+ self.registration_handler = Mock()
+ self.auth_handler = Mock()
+ self.device_handler = Mock()
+ hs.config.enable_registration = True
+ hs.config.registrations_require_3pid = []
+ hs.config.auto_join_rooms = []
+ hs.config.enable_registration_captcha = False
+
+ def test_ui_auth(self):
+ self.hs.config.user_consent_at_registration = True
+ self.hs.config.user_consent_policy_name = "My Cool Privacy Policy"
+ self.hs.config.public_baseurl = "https://example.org"
+ self.hs.config.user_consent_version = "1.0"
+
+ # Do a UI auth request
+ request, channel = self.make_request(b"POST", self.url, b"{}")
+ self.render(request)
+
+ self.assertEquals(channel.result["code"], b"401", channel.result)
+
+ self.assertTrue(channel.json_body is not None)
+ self.assertIsInstance(channel.json_body["session"], six.text_type)
+
+ self.assertIsInstance(channel.json_body["flows"], list)
+ for flow in channel.json_body["flows"]:
+ self.assertIsInstance(flow["stages"], list)
+ self.assertTrue(len(flow["stages"]) > 0)
+ self.assertEquals(flow["stages"][-1], "m.login.terms")
+
+ expected_params = {
+ "m.login.terms": {
+ "policies": {
+ "privacy_policy": {
+ "en": {
+ "name": "My Cool Privacy Policy",
+ "url": "https://example.org/_matrix/consent?v=1.0",
+ },
+ "version": "1.0"
+ },
+ },
+ },
+ }
+ self.assertIsInstance(channel.json_body["params"], dict)
+ self.assertDictContainsSubset(channel.json_body["params"], expected_params)
+
+ # We have to complete the dummy auth stage before completing the terms stage
+ request_data = json.dumps(
+ {
+ "username": "kermit",
+ "password": "monkey",
+ "auth": {
+ "session": channel.json_body["session"],
+ "type": "m.login.dummy",
+ },
+ }
+ )
+
+ self.registration_handler.check_username = Mock(return_value=True)
+
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
+
+ # We don't bother checking that the response is correct - we'll leave that to
+ # other tests. We just want to make sure we're on the right path.
+ self.assertEquals(channel.result["code"], b"401", channel.result)
+
+ # Finish the UI auth for terms
+ request_data = json.dumps(
+ {
+ "username": "kermit",
+ "password": "monkey",
+ "auth": {
+ "session": channel.json_body["session"],
+ "type": "m.login.terms",
+ },
+ }
+ )
+ request, channel = self.make_request(b"POST", self.url, request_data)
+ self.render(request)
+
+ # We're interested in getting a response that looks like a successful
+ # registration, not so much that the details are exactly what we want.
+
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ self.assertTrue(channel.json_body is not None)
+ self.assertIsInstance(channel.json_body["user_id"], six.text_type)
+ self.assertIsInstance(channel.json_body["access_token"], six.text_type)
+ self.assertIsInstance(channel.json_body["device_id"], six.text_type)
diff --git a/tests/unittest.py b/tests/unittest.py
index a3d39920..a9ce57da 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import hashlib
+import hmac
import logging
from mock import Mock
@@ -26,11 +28,13 @@ from twisted.internet.defer import Deferred
from twisted.trial import unittest
from synapse.http.server import JsonResource
+from synapse.http.site import SynapseRequest
from synapse.server import HomeServer
from synapse.types import UserID, create_requester
from synapse.util.logcontext import LoggingContextFilter
from tests.server import get_clock, make_request, render, setup_test_homeserver
+from tests.utils import default_config
# Set up putting Synapse's logs into Trial's.
rootLogger = logging.getLogger()
@@ -142,6 +146,13 @@ def DEBUG(target):
return target
+def INFO(target):
+ """A decorator to set the .loglevel attribute to logging.INFO.
+ Can apply to either a TestCase or an individual test method."""
+ target.loglevel = logging.INFO
+ return target
+
+
class HomeserverTestCase(TestCase):
"""
A base TestCase that reduces boilerplate for HomeServer-using test cases.
@@ -178,11 +189,11 @@ class HomeserverTestCase(TestCase):
for servlet in self.servlets:
servlet(self.hs, self.resource)
- if hasattr(self, "user_id"):
- from tests.rest.client.v1.utils import RestHelper
+ from tests.rest.client.v1.utils import RestHelper
- self.helper = RestHelper(self.hs, self.resource, self.user_id)
+ self.helper = RestHelper(self.hs, self.resource, getattr(self, "user_id", None))
+ if hasattr(self, "user_id"):
if self.hijack_auth:
def get_user_by_access_token(token=None, allow_guest=False):
@@ -219,7 +230,17 @@ class HomeserverTestCase(TestCase):
Function to be overridden in subclasses.
"""
- raise NotImplementedError()
+ hs = self.setup_test_homeserver()
+ return hs
+
+ def default_config(self, name="test"):
+ """
+ Get a default HomeServer config object.
+
+ Args:
+ name (str): The homeserver name/domain.
+ """
+ return default_config(name)
def prepare(self, reactor, clock, homeserver):
"""
@@ -236,7 +257,15 @@ class HomeserverTestCase(TestCase):
Function to optionally be overridden in subclasses.
"""
- def make_request(self, method, path, content=b""):
+ def make_request(
+ self,
+ method,
+ path,
+ content=b"",
+ access_token=None,
+ request=SynapseRequest,
+ shorthand=True,
+ ):
"""
Create a SynapseRequest at the path using the method and containing the
given content.
@@ -247,6 +276,8 @@ class HomeserverTestCase(TestCase):
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
+ shorthand: Whether to try and be helpful and prefix the given URL
+ with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
@@ -254,7 +285,9 @@ class HomeserverTestCase(TestCase):
if isinstance(content, dict):
content = json.dumps(content).encode('utf8')
- return make_request(method, path, content)
+ return make_request(
+ self.reactor, method, path, content, access_token, request, shorthand
+ )
def render(self, request):
"""
@@ -293,3 +326,69 @@ class HomeserverTestCase(TestCase):
return d
self.pump()
return self.successResultOf(d)
+
+ def register_user(self, username, password, admin=False):
+ """
+ Register a user. Requires the Admin API be registered.
+
+ Args:
+ username (bytes/unicode): The user part of the new user.
+ password (bytes/unicode): The password of the new user.
+ admin (bool): Whether the user should be created as an admin
+ or not.
+
+ Returns:
+ The MXID of the new user (unicode).
+ """
+ self.hs.config.registration_shared_secret = u"shared"
+
+ # Create the user
+ request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
+ self.render(request)
+ nonce = channel.json_body["nonce"]
+
+ want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+ nonce_str = b"\x00".join([username.encode('utf8'), password.encode('utf8')])
+ if admin:
+ nonce_str += b"\x00admin"
+ else:
+ nonce_str += b"\x00notadmin"
+ want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str)
+ want_mac = want_mac.hexdigest()
+
+ body = json.dumps(
+ {
+ "nonce": nonce,
+ "username": username,
+ "password": password,
+ "admin": admin,
+ "mac": want_mac,
+ }
+ )
+ request, channel = self.make_request(
+ "POST", "/_matrix/client/r0/admin/register", body.encode('utf8')
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ user_id = channel.json_body["user_id"]
+ return user_id
+
+ def login(self, username, password, device_id=None):
+ """
+ Log in a user, and get an access token. Requires the Login API be
+ registered.
+
+ """
+ body = {"type": "m.login.password", "user": username, "password": password}
+ if device_id:
+ body["device_id"] = device_id
+
+ request, channel = self.make_request(
+ "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8')
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ access_token = channel.json_body["access_token"]
+ return access_token
diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py
index 5cbada4e..50bc7702 100644
--- a/tests/util/test_expiring_cache.py
+++ b/tests/util/test_expiring_cache.py
@@ -65,7 +65,6 @@ class ExpiringCacheTestCase(unittest.TestCase):
def test_time_eviction(self):
clock = MockClock()
cache = ExpiringCache("test", clock, expiry_ms=1000)
- cache.start()
cache["key"] = 1
clock.advance_time(0.5)
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 4633db77..8adaee3c 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -159,6 +159,11 @@ class LoggingContextTestCase(unittest.TestCase):
self.assertEqual(r, "bum")
self._check_test_key("one")
+ def test_nested_logging_context(self):
+ with LoggingContext(request="foo"):
+ nested_context = logcontext.nested_logging_context(suffix="bar")
+ self.assertEqual(nested_context.request, "foo-bar")
+
# a function which returns a deferred which has been "called", but
# which had a function which returned another incomplete deferred on
diff --git a/tests/utils.py b/tests/utils.py
index b85017d2..67ab916f 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -16,7 +16,9 @@
import atexit
import hashlib
import os
+import time
import uuid
+import warnings
from inspect import getcallargs
from mock import Mock, patch
@@ -94,14 +96,80 @@ def setupdb():
atexit.register(_cleanup)
+def default_config(name):
+ """
+ Create a reasonable test config.
+ """
+ config = Mock()
+ config.signing_key = [MockKey()]
+ config.event_cache_size = 1
+ config.enable_registration = True
+ config.macaroon_secret_key = "not even a little secret"
+ config.expire_access_token = False
+ config.server_name = name
+ config.trusted_third_party_id_servers = []
+ config.room_invite_state_types = []
+ config.password_providers = []
+ config.worker_replication_url = ""
+ config.worker_app = None
+ config.email_enable_notifs = False
+ config.block_non_admin_invites = False
+ config.federation_domain_whitelist = None
+ config.federation_rc_reject_limit = 10
+ config.federation_rc_sleep_limit = 10
+ config.federation_rc_sleep_delay = 100
+ config.federation_rc_concurrent = 10
+ config.filter_timeline_limit = 5000
+ config.user_directory_search_all_users = False
+ config.user_consent_server_notice_content = None
+ config.block_events_without_consent_error = None
+ config.user_consent_at_registration = False
+ config.user_consent_policy_name = "Privacy Policy"
+ config.media_storage_providers = []
+ config.autocreate_auto_join_rooms = True
+ config.auto_join_rooms = []
+ config.limit_usage_by_mau = False
+ config.hs_disabled = False
+ config.hs_disabled_message = ""
+ config.hs_disabled_limit_type = ""
+ config.max_mau_value = 50
+ config.mau_trial_days = 0
+ config.mau_limits_reserved_threepids = []
+ config.admin_contact = None
+ config.rc_messages_per_second = 10000
+ config.rc_message_burst_count = 10000
+
+ config.use_frozen_dicts = False
+
+ # we need a sane default_room_version, otherwise attempts to create rooms will
+ # fail.
+ config.default_room_version = "1"
+
+ # disable user directory updates, because they get done in the
+ # background, which upsets the test runner.
+ config.update_user_directory = False
+
+ def is_threepid_reserved(threepid):
+ return ServerConfig.is_threepid_reserved(config, threepid)
+
+ config.is_threepid_reserved.side_effect = is_threepid_reserved
+
+ return config
+
+
class TestHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
@defer.inlineCallbacks
def setup_test_homeserver(
- cleanup_func, name="test", datastore=None, config=None, reactor=None,
- homeserverToUse=TestHomeServer, **kargs
+ cleanup_func,
+ name="test",
+ datastore=None,
+ config=None,
+ reactor=None,
+ homeserverToUse=TestHomeServer,
+ **kargs
):
"""
Setup a homeserver suitable for running tests against. Keyword arguments
@@ -117,55 +185,8 @@ def setup_test_homeserver(
from twisted.internet import reactor
if config is None:
- config = Mock()
- config.signing_key = [MockKey()]
- config.event_cache_size = 1
- config.enable_registration = True
- config.macaroon_secret_key = "not even a little secret"
- config.expire_access_token = False
- config.server_name = name
- config.trusted_third_party_id_servers = []
- config.room_invite_state_types = []
- config.password_providers = []
- config.worker_replication_url = ""
- config.worker_app = None
- config.email_enable_notifs = False
- config.block_non_admin_invites = False
- config.federation_domain_whitelist = None
- config.federation_rc_reject_limit = 10
- config.federation_rc_sleep_limit = 10
- config.federation_rc_sleep_delay = 100
- config.federation_rc_concurrent = 10
- config.filter_timeline_limit = 5000
- config.user_directory_search_all_users = False
- config.user_consent_server_notice_content = None
- config.block_events_without_consent_error = None
- config.media_storage_providers = []
- config.auto_join_rooms = []
- config.limit_usage_by_mau = False
- config.hs_disabled = False
- config.hs_disabled_message = ""
- config.hs_disabled_limit_type = ""
- config.max_mau_value = 50
- config.mau_limits_reserved_threepids = []
- config.admin_contact = None
- config.rc_messages_per_second = 10000
- config.rc_message_burst_count = 10000
-
- # we need a sane default_room_version, otherwise attempts to create rooms will
- # fail.
- config.default_room_version = "1"
-
- # disable user directory updates, because they get done in the
- # background, which upsets the test runner.
- config.update_user_directory = False
-
- def is_threepid_reserved(threepid):
- return ServerConfig.is_threepid_reserved(config, threepid)
-
- config.is_threepid_reserved.side_effect = is_threepid_reserved
-
- config.use_frozen_dicts = True
+ config = default_config(name)
+
config.ldap_enabled = False
if "clock" not in kargs:
@@ -231,20 +252,41 @@ def setup_test_homeserver(
else:
# We need to do cleanup on PostgreSQL
def cleanup():
+ import psycopg2
+
# Close all the db pools
hs.get_db_pool().close()
+ dropped = False
+
# Drop the test database
db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB, user=POSTGRES_USER
)
db_conn.autocommit = True
cur = db_conn.cursor()
- cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
- db_conn.commit()
+
+ # Try a few times to drop the DB. Some things may hold on to the
+ # database for a few more seconds due to flakiness, preventing
+ # us from dropping it when the test is over. If we can't drop
+ # it, warn and move on.
+ for x in range(5):
+ try:
+ cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
+ db_conn.commit()
+ dropped = True
+ except psycopg2.OperationalError as e:
+ warnings.warn(
+ "Couldn't drop old db: " + str(e), category=UserWarning
+ )
+ time.sleep(0.5)
+
cur.close()
db_conn.close()
+ if not dropped:
+ warnings.warn("Failed to drop old DB.", category=UserWarning)
+
if not LEAVE_DB:
# Register the cleanup hook
cleanup_func(cleanup)
@@ -322,8 +364,7 @@ class MockHttpResource(HttpServer):
@patch('twisted.web.http.Request')
@defer.inlineCallbacks
def trigger(
- self, http_method, path, content, mock_request,
- federation_auth_origin=None,
+ self, http_method, path, content, mock_request, federation_auth_origin=None
):
""" Fire an HTTP event.
@@ -356,7 +397,7 @@ class MockHttpResource(HttpServer):
headers = {}
if federation_auth_origin is not None:
headers[b"Authorization"] = [
- b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin, )
+ b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,)
]
mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers)
@@ -576,16 +617,16 @@ def create_room(hs, room_id, creator_id):
event_builder_factory = hs.get_event_builder_factory()
event_creation_handler = hs.get_event_creation_handler()
- builder = event_builder_factory.new({
- "type": EventTypes.Create,
- "state_key": "",
- "sender": creator_id,
- "room_id": room_id,
- "content": {},
- })
-
- event, context = yield event_creation_handler.create_new_client_event(
- builder
+ builder = event_builder_factory.new(
+ {
+ "type": EventTypes.Create,
+ "state_key": "",
+ "sender": creator_id,
+ "room_id": room_id,
+ "content": {},
+ }
)
+ event, context = yield event_creation_handler.create_new_client_event(builder)
+
yield store.persist_event(event, context)
diff --git a/tox.ini b/tox.ini
index 085f4389..03ddaeb0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,8 +3,7 @@ envlist = packaging, py27, py36, pep8, check_isort
[base]
deps =
- coverage
- Twisted>=15.1
+ Twisted>=17.1
mock
python-subunit
junitxml
@@ -12,6 +11,20 @@ deps =
# needed by some of the tests
lxml
+ # cyptography 2.2 requires setuptools >= 18.5
+ #
+ # older versions of virtualenv (?) give us a virtualenv with the same
+ # version of setuptools as is installed on the system python (and tox runs
+ # virtualenv under python3, so we get the version of setuptools that is
+ # installed on that).
+ #
+ # anyway, make sure that we have a recent enough setuptools.
+ setuptools>=18.5
+
+ # we also need a semi-recent version of pip, because old ones fail to
+ # install the "enum34" dependency of cryptography.
+ pip>=10
+
setenv =
PYTHONDONTWRITEBYTECODE = no_byte_code
@@ -26,9 +39,7 @@ passenv = *
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
- coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \
- "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
- {env:DUMP_COVERAGE_COMMAND:coverage report -m}
+ "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
[testenv:py27]
@@ -64,49 +75,41 @@ setenv =
{[base]setenv}
SYNAPSE_POSTGRES = 1
-[testenv:py36]
-usedevelop=true
+# A test suite for the oldest supported versions of Python libraries, to catch
+# any uses of APIs not available in them.
+[testenv:py27-old]
+skip_install=True
+deps =
+ # Old automat version for Twisted
+ Automat == 0.3.0
+
+ mock
+ lxml
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
- coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \
- "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/config \
- tests/api/test_filtering.py \
- tests/api/test_ratelimiting.py \
- tests/appservice \
- tests/crypto \
- tests/events \
- tests/handlers/test_appservice.py \
- tests/handlers/test_auth.py \
- tests/handlers/test_device.py \
- tests/handlers/test_directory.py \
- tests/handlers/test_e2e_keys.py \
- tests/handlers/test_presence.py \
- tests/handlers/test_profile.py \
- tests/handlers/test_register.py \
- tests/replication/slave/storage/test_account_data.py \
- tests/replication/slave/storage/test_receipts.py \
- tests/storage/test_appservice.py \
- tests/storage/test_background_update.py \
- tests/storage/test_base.py \
- tests/storage/test__base.py \
- tests/storage/test_client_ips.py \
- tests/storage/test_devices.py \
- tests/storage/test_end_to_end_keys.py \
- tests/storage/test_event_push_actions.py \
- tests/storage/test_keys.py \
- tests/storage/test_presence.py \
- tests/storage/test_profile.py \
- tests/storage/test_registration.py \
- tests/storage/test_room.py \
- tests/storage/test_user_directory.py \
- tests/test_distributor.py \
- tests/test_dns.py \
- tests/test_preview.py \
- tests/test_test_utils.py \
- tests/test_types.py \
- tests/util} \
- {env:TOXSUFFIX:}
- {env:DUMP_COVERAGE_COMMAND:coverage report -m}
+ # Make all greater-thans equals so we test the oldest version of our direct
+ # dependencies, but make the pyopenssl 17.0, which can work against an
+ # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis).
+ /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install'
+ # Install Synapse itself. This won't update any libraries.
+ pip install -e .
+ {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
+
+[testenv:py35]
+usedevelop=true
+
+[testenv:py36]
+usedevelop=true
+
+[testenv:py36-postgres]
+usedevelop=true
+deps =
+ {[base]deps}
+ psycopg2
+setenv =
+ {[base]setenv}
+ SYNAPSE_POSTGRES = 1
+
[testenv:packaging]
deps =
@@ -116,10 +119,10 @@ commands =
[testenv:pep8]
skip_install = True
-basepython = python2.7
+basepython = python3.6
deps =
flake8
-commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"
+commands = /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
[testenv:check_isort]
skip_install = True