summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2018-08-09 10:38:34 +0100
committerRichard van der Hoff <richard@matrix.org>2018-08-09 10:38:34 +0100
commit9ac9a9ab1020ec4bf395ccf3e07ba6192f6ac558 (patch)
treebb95bcef8f38a23f9b53f5e9707035c628229ab2
parentef2188c1bfd532ddc2e4c1acb533b422afde6c57 (diff)
Imported Upstream version 0.33.2
-rw-r--r--.github/ISSUE_TEMPLATE.md5
-rw-r--r--AUTHORS.rst5
-rw-r--r--CHANGES.md2552
-rw-r--r--CHANGES.rst2867
-rw-r--r--CONTRIBUTING.rst2
-rw-r--r--Dockerfile19
-rw-r--r--MANIFEST.in2
-rw-r--r--README.rst42
-rw-r--r--contrib/docker/README.md116
-rw-r--r--contrib/docker/docker-compose.yml1
-rw-r--r--contrib/grafana/README.md6
-rw-r--r--contrib/grafana/synapse.json4961
-rw-r--r--docker/Dockerfile35
-rw-r--r--docker/README.md124
-rw-r--r--docker/conf/homeserver.yaml (renamed from contrib/docker/conf/homeserver.yaml)0
-rw-r--r--docker/conf/log.config (renamed from contrib/docker/conf/log.config)0
-rwxr-xr-xdocker/start.py (renamed from contrib/docker/start.py)0
-rw-r--r--docs/admin_api/register_api.rst63
-rw-r--r--docs/workers.rst4
-rw-r--r--pyproject.toml29
-rwxr-xr-xscripts/register_new_matrix_user32
-rw-r--r--setup.cfg12
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py44
-rw-r--r--synapse/api/errors.py107
-rw-r--r--synapse/api/filtering.py20
-rw-r--r--synapse/app/client_reader.py16
-rwxr-xr-xsynapse/app/homeserver.py38
-rw-r--r--synapse/app/synchrotron.py5
-rwxr-xr-xsynapse/app/synctl.py4
-rw-r--r--synapse/config/server.py10
-rw-r--r--synapse/config/voip.py4
-rw-r--r--synapse/events/snapshot.py200
-rw-r--r--synapse/federation/federation_client.py300
-rw-r--r--synapse/federation/federation_server.py12
-rw-r--r--synapse/federation/send_queue.py63
-rw-r--r--synapse/federation/transaction_queue.py57
-rw-r--r--synapse/federation/transport/server.py9
-rw-r--r--synapse/federation/units.py1
-rw-r--r--synapse/groups/attestations.py6
-rw-r--r--synapse/handlers/__init__.py4
-rw-r--r--synapse/handlers/_base.py3
-rw-r--r--synapse/handlers/appservice.py5
-rw-r--r--synapse/handlers/auth.py48
-rw-r--r--synapse/handlers/events.py25
-rw-r--r--synapse/handlers/federation.py353
-rw-r--r--synapse/handlers/identity.py25
-rw-r--r--synapse/handlers/initial_sync.py38
-rw-r--r--synapse/handlers/message.py304
-rw-r--r--synapse/handlers/pagination.py265
-rw-r--r--synapse/handlers/profile.py98
-rw-r--r--synapse/handlers/register.py21
-rw-r--r--synapse/handlers/room.py42
-rw-r--r--synapse/handlers/room_member.py13
-rw-r--r--synapse/handlers/search.py2
-rw-r--r--synapse/handlers/sync.py173
-rw-r--r--synapse/http/client.py67
-rw-r--r--synapse/http/request_metrics.py9
-rw-r--r--synapse/http/server.py49
-rw-r--r--synapse/http/servlet.py10
-rw-r--r--synapse/metrics/background_process_metrics.py185
-rw-r--r--synapse/notifier.py2
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py7
-rw-r--r--synapse/replication/http/membership.py18
-rw-r--r--synapse/replication/http/send_event.py17
-rw-r--r--synapse/replication/tcp/client.py2
-rw-r--r--synapse/replication/tcp/resource.py14
-rw-r--r--synapse/rest/__init__.py43
-rw-r--r--synapse/rest/client/v1/admin.py138
-rw-r--r--synapse/rest/client/v1/directory.py4
-rw-r--r--synapse/rest/client/v1/events.py2
-rw-r--r--synapse/rest/client/v1/room.py40
-rw-r--r--synapse/rest/client/v1_only/__init__.py3
-rw-r--r--synapse/rest/client/v1_only/base.py39
-rw-r--r--synapse/rest/client/v1_only/register.py (renamed from synapse/rest/client/v1/register.py)7
-rw-r--r--synapse/rest/client/v2_alpha/register.py12
-rw-r--r--synapse/rest/media/v1/media_repository.py10
-rw-r--r--synapse/rest/media/v1/media_storage.py2
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py8
-rw-r--r--synapse/secrets.py41
-rw-r--r--synapse/server.py22
-rw-r--r--synapse/state.py248
-rw-r--r--synapse/storage/__init__.py28
-rw-r--r--synapse/storage/_base.py13
-rw-r--r--synapse/storage/appservice.py2
-rw-r--r--synapse/storage/background_updates.py10
-rw-r--r--synapse/storage/client_ips.py15
-rw-r--r--synapse/storage/devices.py61
-rw-r--r--synapse/storage/end_to_end_keys.py27
-rw-r--r--synapse/storage/event_federation.py25
-rw-r--r--synapse/storage/event_push_actions.py13
-rw-r--r--synapse/storage/events.py268
-rw-r--r--synapse/storage/events_worker.py30
-rw-r--r--synapse/storage/push_rule.py20
-rw-r--r--synapse/storage/pusher.py2
-rw-r--r--synapse/storage/roommember.py70
-rw-r--r--synapse/storage/schema/delta/50/make_event_content_nullable.py92
-rw-r--r--synapse/storage/schema/full_schemas/16/event_edges.sql3
-rw-r--r--synapse/storage/schema/full_schemas/16/im.sql7
-rw-r--r--synapse/storage/signatures.py2
-rw-r--r--synapse/storage/state.py147
-rw-r--r--synapse/storage/stream.py16
-rw-r--r--synapse/storage/transactions.py8
-rw-r--r--synapse/types.py2
-rw-r--r--synapse/util/async.py145
-rw-r--r--synapse/util/caches/descriptors.py131
-rw-r--r--synapse/util/caches/expiringcache.py6
-rw-r--r--synapse/util/distributor.py48
-rw-r--r--synapse/util/frozenutils.py6
-rw-r--r--synapse/util/logcontext.py11
-rw-r--r--synapse/util/metrics.py19
-rw-r--r--synapse/visibility.py38
-rw-r--r--tests/api/test_auth.py35
-rw-r--r--tests/handlers/test_auth.py80
-rw-r--r--tests/handlers/test_register.py51
-rw-r--r--tests/handlers/test_typing.py1
-rw-r--r--tests/replication/slave/storage/_base.py37
-rw-r--r--tests/replication/slave/storage/test_events.py8
-rw-r--r--tests/rest/client/v1/test_admin.py305
-rw-r--r--tests/rest/client/v1/test_events.py90
-rw-r--r--tests/rest/client/v1/test_register.py5
-rw-r--r--tests/rest/client/v1/test_rooms.py2
-rw-r--r--tests/rest/client/v2_alpha/test_filter.py8
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py8
-rw-r--r--tests/storage/test__init__.py65
-rw-r--r--tests/storage/test_state.py319
-rw-r--r--tests/test_distributor.py56
-rw-r--r--tests/test_federation.py3
-rw-r--r--tests/test_state.py47
-rw-r--r--tests/util/caches/test_descriptors.py101
-rw-r--r--tests/util/test_limiter.py70
-rw-r--r--tests/util/test_linearizer.py78
-rw-r--r--tests/utils.py12
133 files changed, 11894 insertions, 4945 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index d2050a3e..21acb320 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -27,8 +27,9 @@ Describe here the problem that you are experiencing, or the feature you are requ
Describe how what happens differs from what you expected.
-If you can identify any relevant log snippets from _homeserver.log_, please include
-those here (please be careful to remove any personal or private data):
+<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
+those (please be careful to remove any personal or private data). Please surround them with
+``` (three backticks, on a line on their own), so that they are formatted legibly. -->
### Version information
diff --git a/AUTHORS.rst b/AUTHORS.rst
index e13ac5ad..9a83d901 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -62,4 +62,7 @@ Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
-* Docker packaging \ No newline at end of file
+* Docker packaging
+
+Serban Constantin <serban.constantin at gmail dot com>
+ * Small bug fix \ No newline at end of file
diff --git a/CHANGES.md b/CHANGES.md
new file mode 100644
index 00000000..a299110a
--- /dev/null
+++ b/CHANGES.md
@@ -0,0 +1,2552 @@
+Synapse 0.33.2 (2018-08-09)
+===========================
+
+No significant changes.
+
+
+Synapse 0.33.2rc1 (2018-08-07)
+==============================
+
+Features
+--------
+
+- add support for the lazy_loaded_members filter as per MSC1227 ([\#2970](https://github.com/matrix-org/synapse/issues/2970))
+- add support for the include_redundant_members filter param as per MSC1227 ([\#3331](https://github.com/matrix-org/synapse/issues/3331))
+- Add metrics to track resource usage by background processes ([\#3553](https://github.com/matrix-org/synapse/issues/3553), [\#3556](https://github.com/matrix-org/synapse/issues/3556), [\#3604](https://github.com/matrix-org/synapse/issues/3604), [\#3610](https://github.com/matrix-org/synapse/issues/3610))
+- Add `code` label to `synapse_http_server_response_time_seconds` prometheus metric ([\#3554](https://github.com/matrix-org/synapse/issues/3554))
+- Add support for client_reader to handle more APIs ([\#3555](https://github.com/matrix-org/synapse/issues/3555), [\#3597](https://github.com/matrix-org/synapse/issues/3597))
+- make the /context API filter & lazy-load aware as per MSC1227 ([\#3567](https://github.com/matrix-org/synapse/issues/3567))
+- Add ability to limit number of monthly active users on the server ([\#3630](https://github.com/matrix-org/synapse/issues/3630))
+- When we fail to join a room over federation, pass the error code back to the client. ([\#3639](https://github.com/matrix-org/synapse/issues/3639))
+- Add a new /admin/register API for non-interactively creating users. ([\#3415](https://github.com/matrix-org/synapse/issues/3415))
+
+
+Bugfixes
+--------
+
+- Make /directory/list API return 404 for room not found instead of 400 ([\#2952](https://github.com/matrix-org/synapse/issues/2952))
+- Default inviter_display_name to mxid for email invites ([\#3391](https://github.com/matrix-org/synapse/issues/3391))
+- Don't generate TURN credentials if no TURN config options are set ([\#3514](https://github.com/matrix-org/synapse/issues/3514))
+- Correctly announce deleted devices over federation ([\#3520](https://github.com/matrix-org/synapse/issues/3520))
+- Catch failures saving metrics captured by Measure, and instead log the faulty metrics information for further analysis. ([\#3548](https://github.com/matrix-org/synapse/issues/3548))
+- Unicode passwords are now normalised before hashing, preventing the instance where two different devices or browsers might send a different UTF-8 sequence for the password. ([\#3569](https://github.com/matrix-org/synapse/issues/3569))
+- Fix potential stack overflow and deadlock under heavy load ([\#3570](https://github.com/matrix-org/synapse/issues/3570))
+- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes #3585 ([\#3585](https://github.com/matrix-org/synapse/issues/3585))
+- Fix failure to persist events over federation under load ([\#3601](https://github.com/matrix-org/synapse/issues/3601))
+- Fix updating of cached remote profiles ([\#3605](https://github.com/matrix-org/synapse/issues/3605))
+- Fix 'tuple index out of range' error ([\#3607](https://github.com/matrix-org/synapse/issues/3607))
+- Only import secrets when available (fix for py < 3.6) ([\#3626](https://github.com/matrix-org/synapse/issues/3626))
+
+
+Internal Changes
+----------------
+
+- Remove redundant checks on who_forgot_in_room ([\#3350](https://github.com/matrix-org/synapse/issues/3350))
+- Remove unnecessary event re-signing hacks ([\#3367](https://github.com/matrix-org/synapse/issues/3367))
+- Rewrite cache list decorator ([\#3384](https://github.com/matrix-org/synapse/issues/3384))
+- Move v1-only REST APIs into their own module. ([\#3460](https://github.com/matrix-org/synapse/issues/3460))
+- Replace more instances of Python 2-only iteritems and itervalues uses. ([\#3562](https://github.com/matrix-org/synapse/issues/3562))
+- Refactor EventContext to accept state during init ([\#3577](https://github.com/matrix-org/synapse/issues/3577))
+- Improve Dockerfile and docker-compose instructions ([\#3543](https://github.com/matrix-org/synapse/issues/3543))
+- Release notes are now in the Markdown format. ([\#3552](https://github.com/matrix-org/synapse/issues/3552))
+- add config for pep8 ([\#3559](https://github.com/matrix-org/synapse/issues/3559))
+- Merge Linearizer and Limiter ([\#3571](https://github.com/matrix-org/synapse/issues/3571), [\#3572](https://github.com/matrix-org/synapse/issues/3572))
+- Lazily load state on master process when using workers to reduce DB consumption ([\#3579](https://github.com/matrix-org/synapse/issues/3579), [\#3581](https://github.com/matrix-org/synapse/issues/3581), [\#3582](https://github.com/matrix-org/synapse/issues/3582), [\#3584](https://github.com/matrix-org/synapse/issues/3584))
+- Fixes and optimisations for resolve_state_groups ([\#3586](https://github.com/matrix-org/synapse/issues/3586))
+- Improve logging for exceptions when handling PDUs ([\#3587](https://github.com/matrix-org/synapse/issues/3587))
+- Add some measure blocks to persist_events ([\#3590](https://github.com/matrix-org/synapse/issues/3590))
+- Fix some random logcontext leaks. ([\#3591](https://github.com/matrix-org/synapse/issues/3591), [\#3606](https://github.com/matrix-org/synapse/issues/3606))
+- Speed up calculating state deltas in persist_event loop ([\#3592](https://github.com/matrix-org/synapse/issues/3592))
+- Attempt to reduce amount of state pulled out of DB during persist_events ([\#3595](https://github.com/matrix-org/synapse/issues/3595))
+- Fix a documentation typo in on_make_leave_request ([\#3609](https://github.com/matrix-org/synapse/issues/3609))
+- Make EventStore inherit from EventFederationStore ([\#3612](https://github.com/matrix-org/synapse/issues/3612))
+- Remove some redundant joins on event_edges.room_id ([\#3613](https://github.com/matrix-org/synapse/issues/3613))
+- Stop populating events.content ([\#3614](https://github.com/matrix-org/synapse/issues/3614))
+- Update the /send_leave path registration to use event_id rather than a transaction ID. ([\#3616](https://github.com/matrix-org/synapse/issues/3616))
+- Refactor FederationHandler to move DB writes into separate functions ([\#3621](https://github.com/matrix-org/synapse/issues/3621))
+- Remove unused field "pdu_failures" from transactions. ([\#3628](https://github.com/matrix-org/synapse/issues/3628))
+- rename replication_layer to federation_client ([\#3634](https://github.com/matrix-org/synapse/issues/3634))
+- Factor out exception handling in federation_client ([\#3638](https://github.com/matrix-org/synapse/issues/3638))
+- Refactor location of docker build script. ([\#3644](https://github.com/matrix-org/synapse/issues/3644))
+- Update CONTRIBUTING to mention newsfragments. ([\#3645](https://github.com/matrix-org/synapse/issues/3645))
+
+
+Synapse 0.33.1 (2018-08-02)
+===========================
+
+SECURITY FIXES
+--------------
+
+- Fix a potential issue where servers could request events for rooms they have not joined. ([\#3641](https://github.com/matrix-org/synapse/issues/3641))
+- Fix a potential issue where users could see events in private rooms before they joined. ([\#3642](https://github.com/matrix-org/synapse/issues/3642))
+
+Synapse 0.33.0 (2018-07-19)
+===========================
+
+Bugfixes
+--------
+
+- Disable a noisy warning about logcontexts. ([\#3561](https://github.com/matrix-org/synapse/issues/3561))
+
+Synapse 0.33.0rc1 (2018-07-18)
+==============================
+
+Features
+--------
+
+- Enforce the specified API for report\_event. ([\#3316](https://github.com/matrix-org/synapse/issues/3316))
+- Include CPU time from database threads in request/block metrics. ([\#3496](https://github.com/matrix-org/synapse/issues/3496), [\#3501](https://github.com/matrix-org/synapse/issues/3501))
+- Add CPU metrics for \_fetch\_event\_list. ([\#3497](https://github.com/matrix-org/synapse/issues/3497))
+- Optimisation to make handling incoming federation requests more efficient. ([\#3541](https://github.com/matrix-org/synapse/issues/3541))
+
+Bugfixes
+--------
+
+- Fix a significant performance regression in /sync. ([\#3505](https://github.com/matrix-org/synapse/issues/3505), [\#3521](https://github.com/matrix-org/synapse/issues/3521), [\#3530](https://github.com/matrix-org/synapse/issues/3530), [\#3544](https://github.com/matrix-org/synapse/issues/3544))
+- Use more portable syntax in our use of the attrs package, widening the supported versions. ([\#3498](https://github.com/matrix-org/synapse/issues/3498))
+- Fix queued federation requests being processed in the wrong order. ([\#3533](https://github.com/matrix-org/synapse/issues/3533))
+- Ensure that erasure requests are correctly honoured for publicly accessible rooms when accessed over federation. ([\#3546](https://github.com/matrix-org/synapse/issues/3546))
+
+Misc
+----
+
+- Refactoring to improve testability. ([\#3351](https://github.com/matrix-org/synapse/issues/3351), [\#3499](https://github.com/matrix-org/synapse/issues/3499))
+- Use `isort` to sort imports. ([\#3463](https://github.com/matrix-org/synapse/issues/3463), [\#3464](https://github.com/matrix-org/synapse/issues/3464), [\#3540](https://github.com/matrix-org/synapse/issues/3540))
+- Use parse and asserts from http.servlet. ([\#3534](https://github.com/matrix-org/synapse/issues/3534), [\#3535](https://github.com/matrix-org/synapse/issues/3535)).
+
+Synapse 0.32.2 (2018-07-07)
+===========================
+
+Bugfixes
+--------
+
+- Amend the Python dependencies to depend on attrs from PyPI, not attr ([\#3492](https://github.com/matrix-org/synapse/issues/3492))
+
+Synapse 0.32.1 (2018-07-06)
+===========================
+
+Bugfixes
+--------
+
+- Add explicit dependency on netaddr ([\#3488](https://github.com/matrix-org/synapse/issues/3488))
+
+Changes in synapse v0.32.0 (2018-07-06)
+=======================================
+
+No changes since 0.32.0rc1
+
+Synapse 0.32.0rc1 (2018-07-05)
+==============================
+
+Features
+--------
+
+- Add blacklist & whitelist of servers allowed to send events to a room via `m.room.server_acl` event.
+- Cache factor override system for specific caches ([\#3334](https://github.com/matrix-org/synapse/issues/3334))
+- Add metrics to track appservice transactions ([\#3344](https://github.com/matrix-org/synapse/issues/3344))
+- Try to log more helpful info when a sig verification fails ([\#3372](https://github.com/matrix-org/synapse/issues/3372))
+- Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). ([\#3462](https://github.com/matrix-org/synapse/issues/3462))
+- Add optional ip\_range\_whitelist param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465))
+- Reject invalid server names in federation requests ([\#3480](https://github.com/matrix-org/synapse/issues/3480))
+- Reject invalid server names in homeserver.yaml ([\#3483](https://github.com/matrix-org/synapse/issues/3483))
+
+Bugfixes
+--------
+
+- Strip access\_token from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327))
+- Redact AS tokens in logs ([\#3349](https://github.com/matrix-org/synapse/issues/3349))
+- Fix federation backfill from SQLite servers ([\#3355](https://github.com/matrix-org/synapse/issues/3355))
+- Fix event-purge-by-ts admin API ([\#3363](https://github.com/matrix-org/synapse/issues/3363))
+- Fix event filtering in get\_missing\_events handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371))
+- Synapse is now stricter regarding accepting events which it cannot retrieve the prev\_events for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456))
+- Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header ([\#3470](https://github.com/matrix-org/synapse/issues/3470))
+- Invalidate cache on correct thread to avoid race ([\#3473](https://github.com/matrix-org/synapse/issues/3473))
+
+Improved Documentation
+----------------------
+
+- `doc/postgres.rst`: fix display of the last command block. Thanks to @ArchangeGabriel! ([\#3340](https://github.com/matrix-org/synapse/issues/3340))
+
+Deprecations and Removals
+-------------------------
+
+- Remove was\_forgotten\_at ([\#3324](https://github.com/matrix-org/synapse/issues/3324))
+
+Misc
+----
+
+- [\#3332](https://github.com/matrix-org/synapse/issues/3332), [\#3341](https://github.com/matrix-org/synapse/issues/3341), [\#3347](https://github.com/matrix-org/synapse/issues/3347), [\#3348](https://github.com/matrix-org/synapse/issues/3348), [\#3356](https://github.com/matrix-org/synapse/issues/3356), [\#3385](https://github.com/matrix-org/synapse/issues/3385), [\#3446](https://github.com/matrix-org/synapse/issues/3446), [\#3447](https://github.com/matrix-org/synapse/issues/3447), [\#3467](https://github.com/matrix-org/synapse/issues/3467), [\#3474](https://github.com/matrix-org/synapse/issues/3474)
+
+Changes in synapse v0.31.2 (2018-06-14)
+=======================================
+
+SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. (PR #3397)
+
+Discussion around the Matrix Spec change proposal for this change can be followed at <https://github.com/matrix-org/matrix-doc/issues/1304>.
+
+Changes in synapse v0.31.1 (2018-06-08)
+=======================================
+
+v0.31.1 fixes a security bug in the `get_missing_events` federation API where event visibility rules were not applied correctly.
+
+We are not aware of it being actively exploited but please upgrade asap.
+
+Bug Fixes:
+
+- Fix event filtering in get\_missing\_events handler (PR #3371)
+
+Changes in synapse v0.31.0 (2018-06-06)
+=======================================
+
+Most notable change from v0.30.0 is to switch to the python prometheus library to improve system stats reporting. WARNING: this changes a number of prometheus metrics in a backwards-incompatible manner. For more details, see [docs/metrics-howto.rst](docs/metrics-howto.rst#removal-of-deprecated-metrics--time-based-counters-becoming-histograms-in-0310).
+
+Bug Fixes:
+
+- Fix metric documentation tables (PR #3341)
+- Fix LaterGauge error handling (694968f)
+- Fix replication metrics (b7e7fd2)
+
+Changes in synapse v0.31.0-rc1 (2018-06-04)
+===========================================
+
+Features:
+
+- Switch to the Python Prometheus library (PR #3256, #3274)
+- Let users leave the server notice room after joining (PR #3287)
+
+Changes:
+
+- daily user type phone home stats (PR #3264)
+- Use iter\* methods for \_filter\_events\_for\_server (PR #3267)
+- Docs on consent bits (PR #3268)
+- Remove users from user directory on deactivate (PR #3277)
+- Avoid sending consent notice to guest users (PR #3288)
+- disable CPUMetrics if no /proc/self/stat (PR #3299)
+- Consistently use six\'s iteritems and wrap lazy keys/values in list() if they\'re not meant to be lazy (PR #3307)
+- Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat!
+- Reduce stuck read-receipts: ignore depth when updating (PR #3318)
+- Put python\'s logs into Trial when running unit tests (PR #3319)
+
+Changes, python 3 migration:
+
+- Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
+- replace some iteritems with six (PR #3244) Thanks to @NotAFile!
+- Add batch\_iter to utils (PR #3245) Thanks to @NotAFile!
+- use repr, not str (PR #3246) Thanks to @NotAFile!
+- Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
+- Py3 storage/\_base.py (PR #3278) Thanks to @NotAFile!
+- more six iteritems (PR #3279) Thanks to @NotAFile!
+- More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
+- remaining isintance fixes (PR #3281) Thanks to @NotAFile!
+- py3-ize state.py (PR #3283) Thanks to @NotAFile!
+- extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel!
+- use memoryview in py3 (PR #3303) Thanks to @NotAFile!
+
+Bugs:
+
+- Fix federation backfill bugs (PR #3261)
+- federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx!
+
+Changes in synapse v0.30.0 (2018-05-24)
+=======================================
+
+\'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server.
+
+They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as \"Message of the day\".
+
+This feature is specific to Synapse, but uses standard Matrix communication mechanisms, so should work with any Matrix client. For more details see `docs/server_notices.md`
+
+Further Server Notices/Consent Tracking Support:
+
+- Allow overriding the server\_notices user\'s avatar (PR #3273)
+- Use the localpart in the consent uri (PR #3272)
+- Support for putting %(consent\_uri)s in messages (PR #3271)
+- Block attempts to send server notices to remote users (PR #3270)
+- Docs on consent bits (PR #3268)
+
+Changes in synapse v0.30.0-rc1 (2018-05-23)
+===========================================
+
+Server Notices/Consent Tracking Support:
+
+- ConsentResource to gather policy consent from users (PR #3213)
+- Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
+- Infrastructure for a server notices room (PR #3232)
+- Send users a server notice about consent (PR #3236)
+- Reject attempts to send event before privacy consent is given (PR #3257)
+- Add a \'has\_consented\' template var to consent forms (PR #3262)
+- Fix dependency on jinja2 (PR #3263)
+
+Features:
+
+- Cohort analytics (PR #3163, #3241, #3251)
+- Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
+- Add in flight request metrics (PR #3252)
+
+Changes:
+
+- Remove unused update\_external\_syncs (PR #3233)
+- Use stream rather depth ordering for push actions (PR #3212)
+- Make purge\_history operate on tokens (PR #3221)
+- Don\'t support limitless pagination (PR #3265)
+
+Bug Fixes:
+
+- Fix logcontext resource usage tracking (PR #3258)
+- Fix error in handling receipts (PR #3235)
+- Stop the transaction cache caching failures (PR #3255)
+
+Changes in synapse v0.29.1 (2018-05-17)
+=======================================
+
+Changes:
+
+- Update docker documentation (PR #3222)
+
+Changes in synapse v0.29.0 (2018-05-16)
+=======================================
+
+Not changes since v0.29.0-rc1
+
+Changes in synapse v0.29.0-rc1 (2018-05-14)
+===========================================
+
+Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a closed spec bug in the Client Server API. Additionally further prep for Python 3 migration.
+
+Potentially breaking change:
+
+- Make Client-Server API return 401 for invalid token (PR #3161).
+
+ This changes the Client-server spec to return a 401 error code instead of 403 when the access token is unrecognised. This is the behaviour required by the specification, but some clients may be relying on the old, incorrect behaviour.
+
+ Thanks to @NotAFile for fixing this.
+
+Features:
+
+- Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
+
+Changes - General:
+
+- nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
+- Part user from rooms on account deactivate (PR #3201)
+- Make \'unexpected logging context\' into warnings (PR #3007)
+- Set Server header in SynapseRequest (PR #3208)
+- remove duplicates from groups tables (PR #3129)
+- Improve exception handling for background processes (PR #3138)
+- Add missing consumeErrors to improve exception handling (PR #3139)
+- reraise exceptions more carefully (PR #3142)
+- Remove redundant call to preserve\_fn (PR #3143)
+- Trap exceptions thrown within run\_in\_background (PR #3144)
+
+Changes - Refactors:
+
+- Refactor /context to reuse pagination storage functions (PR #3193)
+- Refactor recent events func to use pagination func (PR #3195)
+- Refactor pagination DB API to return concrete type (PR #3196)
+- Refactor get\_recent\_events\_for\_room return type (PR #3198)
+- Refactor sync APIs to reuse pagination API (PR #3199)
+- Remove unused code path from member change DB func (PR #3200)
+- Refactor request handling wrappers (PR #3203)
+- transaction\_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
+- Refactor event storage to prepare for changes in state calculations (PR #3141)
+- Set Server header in SynapseRequest (PR #3208)
+- Use deferred.addTimeout instead of time\_bound\_deferred (PR #3127, #3178)
+- Use run\_in\_background in preference to preserve\_fn (PR #3140)
+
+Changes - Python 3 migration:
+
+- Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
+- run config tests on py3 (PR #3159) Thanks to @NotAFile!
+- Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
+- Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
+- Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
+- Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
+- Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
+- Don\'t yield in list comprehensions (PR #3150) Thanks to @NotAFile!
+- Move more xrange to six (PR #3151) Thanks to @NotAFile!
+- make imports local (PR #3152) Thanks to @NotAFile!
+- move httplib import to six (PR #3153) Thanks to @NotAFile!
+- Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
+- more bytes strings (PR #3155) Thanks to @NotAFile!
+
+Bug Fixes:
+
+- synapse fails to start under Twisted \>= 18.4 (PR #3157)
+- Fix a class of logcontext leaks (PR #3170)
+- Fix a couple of logcontext leaks in unit tests (PR #3172)
+- Fix logcontext leak in media repo (PR #3174)
+- Escape label values in prometheus metrics (PR #3175, #3186)
+- Fix \'Unhandled Error\' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
+- Fix logcontext leaks in rate limiter (PR #3183)
+- notifications: Convert next\_token to string according to the spec (PR #3190) Thanks to @mujx!
+- nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
+- add guard for None on purge\_history api (PR #3160) Thanks to @krombel!
+
+Changes in synapse v0.28.1 (2018-05-01)
+=======================================
+
+SECURITY UPDATE
+
+- Clamp the allowed values of event depth received over federation to be \[0, 2\^63 - 1\]. This mitigates an attack where malicious events injected with depth = 2\^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream\_ordering rather than depth (topological\_ordering).
+
+ This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used.
+
+ Full details at <https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI>
+
+- Pin Twisted to \<18.4 until we stop using the private \_OpenSSLECCurve API.
+
+Changes in synapse v0.28.0 (2018-04-26)
+=======================================
+
+Bug Fixes:
+
+- Fix quarantine media admin API and search reindex (PR #3130)
+- Fix media admin APIs (PR #3134)
+
+Changes in synapse v0.28.0-rc1 (2018-04-24)
+===========================================
+
+Minor performance improvement to federation sending and bug fixes.
+
+(Note: This release does not include the delta state resolution implementation discussed in matrix live)
+
+Features:
+
+- Add metrics for event processing lag (PR #3090)
+- Add metrics for ResponseCache (PR #3092)
+
+Changes:
+
+- Synapse on PyPy (PR #2760) Thanks to @Valodim!
+- move handling of auto\_join\_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
+- Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
+- Document the behaviour of ResponseCache (PR #3059)
+- Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
+- update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
+- use python3-compatible prints (PR #3074) Thanks to @NotAFile!
+- Send federation events concurrently (PR #3078)
+- Limit concurrent event sends for a room (PR #3079)
+- Improve R30 stat definition (PR #3086)
+- Send events to ASes concurrently (PR #3088)
+- Refactor ResponseCache usage (PR #3093)
+- Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
+- Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
+- Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
+- Refactor store.have\_events (PR #3117)
+
+Bug Fixes:
+
+- Return 401 for invalid access\_token on logout (PR #2938) Thanks to @dklug!
+- Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
+- fix federation\_domain\_whitelist (PR #3099)
+- Avoid creating events with huge numbers of prev\_events (PR #3113)
+- Reject events which have lots of prev\_events (PR #3118)
+
+Changes in synapse v0.27.4 (2018-04-13)
+=======================================
+
+Changes:
+
+- Update canonicaljson dependency (\#3095)
+
+Changes in synapse v0.27.3 (2018-04-11)
+======================================
+
+Bug fixes:
+
+- URL quote path segments over federation (\#3082)
+
+Changes in synapse v0.27.3-rc2 (2018-04-09)
+===========================================
+
+v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
+
+Changes in synapse v0.27.3-rc1 (2018-04-09)
+===========================================
+
+Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overal growth of the Matrix ecosystem. It is defined as:-
+
+Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days
+
+: - Where last seen at most 30 days ago
+ - Where account creation and last\_seen are \> 30 days\"
+
+Features:
+
+- Add joinability for groups (PR #3045)
+- Implement group join API (PR #3046)
+- Add counter metrics for calculating state delta (PR #3033)
+- R30 stats (PR #3041)
+- Measure time it takes to calculate state group ID (PR #3043)
+- Add basic performance statistics to phone home (PR #3044)
+- Add response size metrics (PR #3071)
+- phone home cache size configurations (PR #3063)
+
+Changes:
+
+- Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
+- Replace old style error catching with \'as\' keyword (PR #3000) Thanks to @NotAFile!
+- Use .iter\* to avoid copies in StateHandler (PR #3006)
+- Linearize calls to \_generate\_user\_id (PR #3029)
+- Remove last usage of ujson (PR #3030)
+- Use simplejson throughout (PR #3048)
+- Use static JSONEncoders (PR #3049)
+- Remove uses of events.content (PR #3060)
+- Improve database cache performance (PR #3068)
+
+Bug fixes:
+
+- Add room\_id to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte!
+- Fix replication after switch to simplejson (PR #3015)
+- 404 correctly on missing paths via NoResource (PR #3022)
+- Fix error when claiming e2e keys from offline servers (PR #3034)
+- fix tests/storage/test\_user\_directory.py (PR #3042)
+- use PUT instead of POST for federating groups/m.join\_policy (PR #3070) Thanks to @krombel!
+- postgres port script: fix state\_groups\_pkey error (PR #3072)
+
+Changes in synapse v0.27.2 (2018-03-26)
+=======================================
+
+Bug fixes:
+
+- Fix bug which broke TCP replication between workers (PR #3015)
+
+Changes in synapse v0.27.1 (2018-03-26)
+=======================================
+
+Meta release as v0.27.0 temporarily pointed to the wrong commit
+
+Changes in synapse v0.27.0 (2018-03-26)
+=======================================
+
+No changes since v0.27.0-rc2
+
+Changes in synapse v0.27.0-rc2 (2018-03-19)
+===========================================
+
+Pulls in v0.26.1
+
+Bug fixes:
+
+- Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
+
+Changes in synapse v0.26.1 (2018-03-15)
+=======================================
+
+Bug fixes:
+
+- Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library (PR #3008)
+
+Changes in synapse v0.27.0-rc1 (2018-03-14)
+===========================================
+
+The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using `-a` option with workers. A new worker file should be added with `worker_app: synapse.app.homeserver`.
+
+This release also begins the process of renaming a number of the metrics reported to prometheus. See [docs/metrics-howto.rst](docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0). Note that the v0.28.0 release will remove the deprecated metric names.
+
+Features:
+
+- Add ability for ASes to override message send time (PR #2754)
+- Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
+- Add purge API features, see [docs/admin\_api/purge\_history\_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
+- Add support for whitelisting 3PIDs that users can register. (PR #2813)
+- Add `/room/{id}/event/{id}` API (PR #2766)
+- Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
+- Add `federation_domain_whitelist` option (PR #2820, #2821)
+
+Changes:
+
+- Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) (PR #2892 - \#2904, #2913, #2920 - \#2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - \#2984, #2987 - \#2989, #2991 - \#2993, #2995, #2784)
+- Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
+- Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
+- No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
+- Remove `verbosity`/`log_file` from generated config (PR #2755)
+- Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
+- When using synctl with workers, don\'t start the main synapse automatically (PR #2774)
+- Minor performance improvements (PR #2773, #2792)
+- Use a connection pool for non-federation outbound connections (PR #2817)
+- Make it possible to run unit tests against postgres (PR #2829)
+- Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
+- Remove ability for AS users to call /events and /sync (PR #2948)
+- Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
+
+Bug fixes:
+
+- Fix broken `ldap_config` config option (PR #2683) Thanks to @seckrv!
+- Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
+- Fix publicised groups GET API (singular) over federation (PR #2772)
+- Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831)
+- Fix error on `/publicRooms` when no rooms exist (PR #2827)
+- Fix bug in quarantine\_media (PR #2837)
+- Fix url\_previews when no Content-Type is returned from URL (PR #2845)
+- Fix rare race in sync API when joining room (PR #2944)
+- Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
+
+Changes in synapse v0.26.0 (2018-01-05)
+=======================================
+
+No changes since v0.26.0-rc1
+
+Changes in synapse v0.26.0-rc1 (2017-12-13)
+===========================================
+
+Features:
+
+- Add ability for ASes to publicise groups for their users (PR #2686)
+- Add all local users to the user\_directory and optionally search them (PR #2723)
+- Add support for custom login types for validating users (PR #2729)
+
+Changes:
+
+- Update example Prometheus config to new format (PR #2648) Thanks to @krombel!
+- Rename redact\_content option to include\_content in Push API (PR #2650)
+- Declare support for r0.3.0 (PR #2677)
+- Improve upserts (PR #2684, #2688, #2689, #2713)
+- Improve documentation of workers (PR #2700)
+- Improve tracebacks on exceptions (PR #2705)
+- Allow guest access to group APIs for reading (PR #2715)
+- Support for posting content in federation\_client script (PR #2716)
+- Delete devices and pushers on logouts etc (PR #2722)
+
+Bug fixes:
+
+- Fix database port script (PR #2673)
+- Fix internal server error on login with ldap\_auth\_provider (PR #2678) Thanks to @jkolo!
+- Fix error on sqlite 3.7 (PR #2697)
+- Fix OPTIONS on preview\_url (PR #2707)
+- Fix error handling on dns lookup (PR #2711)
+- Fix wrong avatars when inviting multiple users when creating room (PR #2717)
+- Fix 500 when joining matrix-dev (PR #2719)
+
+Changes in synapse v0.25.1 (2017-11-17)
+=======================================
+
+Bug fixes:
+
+- Fix login with LDAP and other password provider modules (PR #2678). Thanks to @jkolo!
+
+Changes in synapse v0.25.0 (2017-11-15)
+=======================================
+
+Bug fixes:
+
+- Fix port script (PR #2673)
+
+Changes in synapse v0.25.0-rc1 (2017-11-14)
+===========================================
+
+Features:
+
+- Add is\_public to groups table to allow for private groups (PR #2582)
+- Add a route for determining who you are (PR #2668) Thanks to @turt2live!
+- Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629)
+- Add a hook for custom rest endpoints (PR #2627)
+- Add API to update group room visibility (PR #2651)
+
+Changes:
+
+- Ignore \<noscript\> tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt!
+- Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel!
+- Support /keys/upload on /r0 as well as /unstable (PR #2585)
+- Front-end proxy: pass through auth header (PR #2586)
+- Allow ASes to deactivate their own users (PR #2589)
+- Remove refresh tokens (PR #2613)
+- Automatically set default displayname on register (PR #2617)
+- Log login requests (PR #2618)
+- Always return is\_public in the /groups/:group\_id/rooms API (PR #2630)
+- Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
+- Fix various embarrassing typos around user\_directory and add some doc. (PR #2643)
+- Return whether a user is an admin within a group (PR #2647)
+- Namespace visibility options for groups (PR #2657)
+- Downcase UserIDs on registration (PR #2662)
+- Cache failures when fetching URL previews (PR #2669)
+
+Bug fixes:
+
+- Fix port script (PR #2577)
+- Fix error when running synapse with no logfile (PR #2581)
+- Fix UI auth when deleting devices (PR #2591)
+- Fix typo when checking if user is invited to group (PR #2599)
+- Fix the port script to drop NUL values in all tables (PR #2611)
+- Fix appservices being backlogged and not receiving new events due to a bug in notify\_interested\_services (PR #2631) Thanks to @xyzz!
+- Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima!
+- Fix bug in state group storage (PR #2649)
+- Fix 500 on invalid utf-8 in request (PR #2663)
+
+Changes in synapse v0.24.1 (2017-10-24)
+=======================================
+
+Bug fixes:
+
+- Fix updating group profiles over federation (PR #2567)
+
+Changes in synapse v0.24.0 (2017-10-23)
+=======================================
+
+No changes since v0.24.0-rc1
+
+Changes in synapse v0.24.0-rc1 (2017-10-19)
+===========================================
+
+Features:
+
+- Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426, #2430, #2454, #2471, #2472, #2544)
+- Add support for channel notifications (PR #2501)
+- Add basic implementation of backup media store (PR #2538)
+- Add config option to auto-join new users to rooms (PR #2545)
+
+Changes:
+
+- Make the spam checker a module (PR #2474)
+- Delete expired url cache data (PR #2478)
+- Ignore incoming events for rooms that we have left (PR #2490)
+- Allow spam checker to reject invites too (PR #2492)
+- Add room creation checks to spam checker (PR #2495)
+- Spam checking: add the invitee to user\_may\_invite (PR #2502)
+- Process events from federation for different rooms in parallel (PR #2520)
+- Allow error strings from spam checker (PR #2531)
+- Improve error handling for missing files in config (PR #2551)
+
+Bug fixes:
+
+- Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
+- Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline!
+- Fix notification keywords that start/end with non-word chars (PR #2500)
+- Fix stack overflow and logcontexts from linearizer (PR #2532)
+- Fix 500 error when fields missing from power\_levels event (PR #2552)
+- Fix 500 error when we get an error handling a PDU (PR #2553)
+
+Changes in synapse v0.23.1 (2017-10-02)
+=======================================
+
+Changes:
+
+- Make \'affinity\' package optional, as it is not supported on some platforms
+
+Changes in synapse v0.23.0 (2017-10-02)
+=======================================
+
+No changes since v0.23.0-rc2
+
+Changes in synapse v0.23.0-rc2 (2017-09-26)
+===========================================
+
+Bug fixes:
+
+- Fix regression in performance of syncs (PR #2470)
+
+Changes in synapse v0.23.0-rc1 (2017-09-25)
+===========================================
+
+Features:
+
+- Add a frontend proxy worker (PR #2344)
+- Add support for event\_id\_only push format (PR #2450)
+- Add a PoC for filtering spammy events (PR #2456)
+- Add a config option to block all room invites (PR #2457)
+
+Changes:
+
+- Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
+- Improve performance of generating push notifications (PR #2343, #2357, #2365, #2366, #2371)
+- Improve DB performance for device list handling in sync (PR #2362)
+- Include a sample prometheus config (PR #2416)
+- Document known to work postgres version (PR #2433) Thanks to @ptman!
+
+Bug fixes:
+
+- Fix caching error in the push evaluator (PR #2332)
+- Fix bug where pusherpool didn\'t start and broke some rooms (PR #2342)
+- Fix port script for user directory tables (PR #2375)
+- Fix device lists notifications when user rejoins a room (PR #2443, #2449)
+- Fix sync to always send down current state events in timeline (PR #2451)
+- Fix bug where guest users were incorrectly kicked (PR #2453)
+- Fix bug talking to IPv6 only servers using SRV records (PR #2462)
+
+Changes in synapse v0.22.1 (2017-07-06)
+=======================================
+
+Bug fixes:
+
+- Fix bug where pusher pool didn\'t start and caused issues when interacting with some rooms (PR #2342)
+
+Changes in synapse v0.22.0 (2017-07-06)
+=======================================
+
+No changes since v0.22.0-rc2
+
+Changes in synapse v0.22.0-rc2 (2017-07-04)
+===========================================
+
+Changes:
+
+- Improve performance of storing user IPs (PR #2307, #2308)
+- Slightly improve performance of verifying access tokens (PR #2320)
+- Slightly improve performance of event persistence (PR #2321)
+- Increase default cache factor size from 0.1 to 0.5 (PR #2330)
+
+Bug fixes:
+
+- Fix bug with storing registration sessions that caused frequent CPU churn (PR #2319)
+
+Changes in synapse v0.22.0-rc1 (2017-06-26)
+===========================================
+
+Features:
+
+- Add a user directory API (PR #2252, and many more)
+- Add shutdown room API to remove room from local server (PR #2291)
+- Add API to quarantine media (PR #2292)
+- Add new config option to not send event contents to push servers (PR #2301) Thanks to @cjdelisle!
+
+Changes:
+
+- Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256, #2274)
+- Deduplicate sync filters (PR #2219) Thanks to @krombel!
+- Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
+- Add count of one time keys to sync stream (PR #2237)
+- Only store event\_auth for state events (PR #2247)
+- Store URL cache preview downloads separately (PR #2299)
+
+Bug fixes:
+
+- Fix users not getting notifications when AS listened to that user\_id (PR #2216) Thanks to @slipeer!
+- Fix users without push set up not getting notifications after joining rooms (PR #2236)
+- Fix preview url API to trim long descriptions (PR #2243)
+- Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263)
+- Fix removing of pushers when using workers (PR #2267)
+- Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
+
+Changes in synapse v0.21.1 (2017-06-15)
+=======================================
+
+Bug fixes:
+
+- Fix bug in anonymous usage statistic reporting (PR #2281)
+
+Changes in synapse v0.21.0 (2017-05-18)
+=======================================
+
+No changes since v0.21.0-rc3
+
+Changes in synapse v0.21.0-rc3 (2017-05-17)
+===========================================
+
+Features:
+
+- Add per user rate-limiting overrides (PR #2208)
+- Add config option to limit maximum number of events requested by `/sync` and `/messages` (PR #2221) Thanks to @psaavedra!
+
+Changes:
+
+- Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229)
+- Update username availability checker API (PR #2209, #2213)
+- When purging, don\'t de-delta state groups we\'re about to delete (PR #2214)
+- Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
+- Add an index to event\_search to speed up purge history API (PR #2218)
+
+Bug fixes:
+
+- Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
+
+Changes in synapse v0.21.0-rc2 (2017-05-08)
+===========================================
+
+Changes:
+
+- Always mark remotes as up if we receive a signed request from them (PR #2190)
+
+Bug fixes:
+
+- Fix bug where users got pushed for rooms they had muted (PR #2200)
+
+Changes in synapse v0.21.0-rc1 (2017-05-08)
+===========================================
+
+Features:
+
+- Add username availability checker API (PR #2183)
+- Add read marker API (PR #2120)
+
+Changes:
+
+- Enable guest access for the 3pl/3pid APIs (PR #1986)
+- Add setting to support TURN for guests (PR #2011)
+- Various performance improvements (PR #2075, #2076, #2080, #2083, #2108, #2158, #2176, #2185)
+- Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
+- Replace HTTP replication with TCP replication (PR #2082, #2097, #2098, #2099, #2103, #2014, #2016, #2115, #2116, #2117)
+- Support authenticated SMTP (PR #2102) Thanks @DanielDent!
+- Add a counter metric for successfully-sent transactions (PR #2121)
+- Propagate errors sensibly from proxied IS requests (PR #2147)
+- Add more granular event send metrics (PR #2178)
+
+Bug fixes:
+
+- Fix nuke-room script to work with current schema (PR #1927) Thanks @zuckschwerdt!
+- Fix db port script to not assume postgres tables are in the public schema (PR #2024) Thanks @jerrykan!
+- Fix getting latest device IP for user with no devices (PR #2118)
+- Fix rejection of invites to unreachable servers (PR #2145)
+- Fix code for reporting old verify keys in synapse (PR #2156)
+- Fix invite state to always include all events (PR #2163)
+- Fix bug where synapse would always fetch state for any missing event (PR #2170)
+- Fix a leak with timed out HTTP connections (PR #2180)
+- Fix bug where we didn\'t time out HTTP requests to ASes (PR #2192)
+
+Docs:
+
+- Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
+- Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
+- `web_client_location` documentation fix (PR #2131) Thanks @matthewjwolff!
+- Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
+- Clarify setting up metrics (PR #2149) Thanks @encks!
+
+Changes in synapse v0.20.0 (2017-04-11)
+=======================================
+
+Bug fixes:
+
+- Fix joining rooms over federation where not all servers in the room saw the new server had joined (PR #2094)
+
+Changes in synapse v0.20.0-rc1 (2017-03-30)
+===========================================
+
+Features:
+
+- Add delete\_devices API (PR #1993)
+- Add phone number registration/login support (PR #1994, #2055)
+
+Changes:
+
+- Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
+- Reread log config on SIGHUP (PR #1982)
+- Speed up public room list (PR #1989)
+- Add helpful texts to logger config options (PR #1990)
+- Minor `/sync` performance improvements. (PR #2002, #2013, #2022)
+- Add some debug to help diagnose weird federation issue (PR #2035)
+- Correctly limit retries for all federation requests (PR #2050, #2061)
+- Don\'t lock table when persisting new one time keys (PR #2053)
+- Reduce some CPU work on DB threads (PR #2054)
+- Cache hosts in room (PR #2060)
+- Batch sending of device list pokes (PR #2063)
+- Speed up persist event path in certain edge cases (PR #2070)
+
+Bug fixes:
+
+- Fix bug where current\_state\_events renamed to current\_state\_ids (PR #1849)
+- Fix routing loop when fetching remote media (PR #1992)
+- Fix current\_state\_events table to not lie (PR #1996)
+- Fix CAS login to handle PartialDownloadError (PR #1997)
+- Fix assertion to stop transaction queue getting wedged (PR #2010)
+- Fix presence to fallback to last\_active\_ts if it beats the last sync time. Thanks @Half-Shot! (PR #2014)
+- Fix bug when federation received a PDU while a room join is in progress (PR #2016)
+- Fix resetting state on rejected events (PR #2025)
+- Fix installation issues in readme. Thanks @ricco386 (PR #2037)
+- Fix caching of remote servers\' signature keys (PR #2042)
+- Fix some leaking log context (PR #2048, #2049, #2057, #2058)
+- Fix rejection of invites not reaching sync (PR #2056)
+
+Changes in synapse v0.19.3 (2017-03-20)
+=======================================
+
+No changes since v0.19.3-rc2
+
+Changes in synapse v0.19.3-rc2 (2017-03-13)
+===========================================
+
+Bug fixes:
+
+- Fix bug in handling of incoming device list updates over federation.
+
+Changes in synapse v0.19.3-rc1 (2017-03-08)
+===========================================
+
+Features:
+
+- Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
+
+Changes:
+
+- Reduce database table sizes (PR #1873, #1916, #1923, #1963)
+- Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
+- Don\'t fetch current state when sending an event in common case (PR #1955)
+
+Bug fixes:
+
+- Fix synapse\_port\_db failure. Thanks to Pneumaticat! (PR #1904)
+- Fix caching to not cache error responses (PR #1913)
+- Fix APIs to make kick & ban reasons work (PR #1917)
+- Fix bugs in the /keys/changes api (PR #1921)
+- Fix bug where users couldn\'t forget rooms they were banned from (PR #1922)
+- Fix issue with long language values in pushers API (PR #1925)
+- Fix a race in transaction queue (PR #1930)
+- Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945)
+- Fix device list update to not constantly resync (PR #1964)
+- Fix potential for huge memory usage when getting device that have changed (PR #1969)
+
+Changes in synapse v0.19.2 (2017-02-20)
+=======================================
+
+- Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! (PR #1929)
+
+Changes in synapse v0.19.1 (2017-02-09)
+=======================================
+
+- Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks (PR #1892)
+
+Changes in synapse v0.19.0 (2017-02-04)
+=======================================
+
+No changes since RC 4.
+
+Changes in synapse v0.19.0-rc4 (2017-02-02)
+===========================================
+
+- Bump cache sizes for common membership queries (PR #1879)
+
+Changes in synapse v0.19.0-rc3 (2017-02-02)
+===========================================
+
+- Fix email push in pusher worker (PR #1875)
+- Make presence.get\_new\_events a bit faster (PR #1876)
+- Make /keys/changes a bit more performant (PR #1877)
+
+Changes in synapse v0.19.0-rc2 (2017-02-02)
+===========================================
+
+- Include newly joined users in /keys/changes API (PR #1872)
+
+Changes in synapse v0.19.0-rc1 (2017-02-02)
+===========================================
+
+Features:
+
+- Add support for specifying multiple bind addresses (PR #1709, #1712, #1795, #1835). Thanks to @kyrias!
+- Add /account/3pid/delete endpoint (PR #1714)
+- Add config option to configure the Riot URL used in notification emails (PR #1811). Thanks to @aperezdc!
+- Add username and password config options for turn server (PR #1832). Thanks to @xsteadfastx!
+- Implement device lists updates over federation (PR #1857, #1861, #1864)
+- Implement /keys/changes (PR #1869, #1872)
+
+Changes:
+
+- Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
+- Log which files we saved attachments to in the media\_repository (PR #1791)
+- Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787)
+- Limit number of entries to prefill from cache on startup (PR #1792)
+- Remove full\_twisted\_stacktraces option (PR #1802)
+- Measure size of some caches by sum of the size of cached values (PR #1815)
+- Measure metrics of string\_cache (PR #1821)
+- Reduce logging verbosity (PR #1822, #1823, #1824)
+- Don\'t clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852)
+- Better handle 401/404 response for federation /send/ (PR #1866, #1871)
+
+Fixes:
+
+- Fix ability to change password to a non-ascii one (PR #1711)
+- Fix push getting stuck due to looking at the wrong view of state (PR #1820)
+- Fix email address comparison to be case insensitive (PR #1827)
+- Fix occasional inconsistencies of room membership (PR #1836, #1840)
+
+Performance:
+
+- Don\'t block messages sending on bumping presence (PR #1789)
+- Change device\_inbox stream index to include user (PR #1793)
+- Optimise state resolution (PR #1818)
+- Use DB cache of joined users for presence (PR #1862)
+- Add an index to make membership queries faster (PR #1867)
+
+Changes in synapse v0.18.7 (2017-01-09)
+=======================================
+
+No changes from v0.18.7-rc2
+
+Changes in synapse v0.18.7-rc2 (2017-01-07)
+===========================================
+
+Bug fixes:
+
+- Fix error in rc1\'s discarding invalid inbound traffic logic that was incorrectly discarding missing events
+
+Changes in synapse v0.18.7-rc1 (2017-01-06)
+===========================================
+
+Bug fixes:
+
+- Fix error in \#PR 1764 to actually fix the nightmare \#1753 bug.
+- Improve deadlock logging further
+- Discard inbound federation traffic from invalid domains, to immunise against \#1753
+
+Changes in synapse v0.18.6 (2017-01-06)
+=======================================
+
+Bug fixes:
+
+- Fix bug when checking if a guest user is allowed to join a room (PR #1772) Thanks to Patrik Oldsberg for diagnosing and the fix!
+
+Changes in synapse v0.18.6-rc3 (2017-01-05)
+===========================================
+
+Bug fixes:
+
+- Fix bug where we failed to send ban events to the banned server (PR #1758)
+- Fix bug where we sent event that didn\'t originate on this server to other servers (PR #1764)
+- Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744)
+
+Changes:
+
+- Improve logging for debugging deadlocks (PR #1766, PR #1767)
+
+Changes in synapse v0.18.6-rc2 (2016-12-30)
+===========================================
+
+Bug fixes:
+
+- Fix memory leak in twisted by initialising logging correctly (PR #1731)
+- Fix bug where fetching missing events took an unacceptable amount of time in large rooms (PR #1734)
+
+Changes in synapse v0.18.6-rc1 (2016-12-29)
+===========================================
+
+Bug fixes:
+
+- Make sure that outbound connections are closed (PR #1725)
+
+Changes in synapse v0.18.5 (2016-12-16)
+=======================================
+
+Bug fixes:
+
+- Fix federation /backfill returning events it shouldn\'t (PR #1700)
+- Fix crash in url preview (PR #1701)
+
+Changes in synapse v0.18.5-rc3 (2016-12-13)
+===========================================
+
+Features:
+
+- Add support for E2E for guests (PR #1653)
+- Add new API appservice specific public room list (PR #1676)
+- Add new room membership APIs (PR #1680)
+
+Changes:
+
+- Enable guest access for private rooms by default (PR #653)
+- Limit the number of events that can be created on a given room concurrently (PR #1620)
+- Log the args that we have on UI auth completion (PR #1649)
+- Stop generating refresh\_tokens (PR #1654)
+- Stop putting a time caveat on access tokens (PR #1656)
+- Remove unspecced GET endpoints for e2e keys (PR #1694)
+
+Bug fixes:
+
+- Fix handling of 500 and 429\'s over federation (PR #1650)
+- Fix Content-Type header parsing (PR #1660)
+- Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664)
+- Fix some cases where we drop read receipts (PR #1678)
+- Fix bug where calls to `/sync` didn\'t correctly timeout (PR #1683)
+- Fix bug where E2E key query would fail if a single remote host failed (PR #1686)
+
+Changes in synapse v0.18.5-rc2 (2016-11-24)
+===========================================
+
+Bug fixes:
+
+- Don\'t send old events over federation, fixes bug in -rc1.
+
+Changes in synapse v0.18.5-rc1 (2016-11-24)
+===========================================
+
+Features:
+
+- Implement \"event\_fields\" in filters (PR #1638)
+
+Changes:
+
+- Use external ldap auth pacakge (PR #1628)
+- Split out federation transaction sending to a worker (PR #1635)
+- Fail with a coherent error message if /sync?filter= is invalid (PR #1636)
+- More efficient notif count queries (PR #1644)
+
+Changes in synapse v0.18.4 (2016-11-22)
+=======================================
+
+Bug fixes:
+
+- Add workaround for buggy clients that the fail to register (PR #1632)
+
+Changes in synapse v0.18.4-rc1 (2016-11-14)
+===========================================
+
+Changes:
+
+- Various database efficiency improvements (PR #1188, #1192)
+- Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR #1198)
+- Allow specifying duration in minutes in config, thanks to Daniel Dent (PR #1625)
+
+Bug fixes:
+
+- Fix media repo to set CORs headers on responses (PR #1190)
+- Fix registration to not error on non-ascii passwords (PR #1191)
+- Fix create event code to limit the number of prev\_events (PR #1615)
+- Fix bug in transaction ID deduplication (PR #1624)
+
+Changes in synapse v0.18.3 (2016-11-08)
+=======================================
+
+SECURITY UPDATE
+
+Explicitly require authentication when using LDAP3. This is the default on versions of `ldap3` above 1.0, but some distributions will package an older version.
+
+If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to updgrade**.
+
+Changes in synapse v0.18.2 (2016-11-01)
+=======================================
+
+No changes since v0.18.2-rc5
+
+Changes in synapse v0.18.2-rc5 (2016-10-28)
+===========================================
+
+Bug fixes:
+
+- Fix prometheus process metrics in worker processes (PR #1184)
+
+Changes in synapse v0.18.2-rc4 (2016-10-27)
+===========================================
+
+Bug fixes:
+
+- Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade (PR #1183)
+
+Changes in synapse v0.18.2-rc3 (2016-10-27)
+===========================================
+
+Changes:
+
+- Allow clients to supply access tokens as headers (PR #1098)
+- Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
+- Make password reset email field case insensitive (PR #1170)
+- Reduce redundant database work in email pusher (PR #1174)
+- Allow configurable rate limiting per AS (PR #1175)
+- Check whether to ratelimit sooner to avoid work (PR #1176)
+- Standardise prometheus metrics (PR #1177)
+
+Bug fixes:
+
+- Fix incredibly slow back pagination query (PR #1178)
+- Fix infinite typing bug (PR #1179)
+
+Changes in synapse v0.18.2-rc2 (2016-10-25)
+===========================================
+
+(This release did not include the changes advertised and was identical to RC1)
+
+Changes in synapse v0.18.2-rc1 (2016-10-17)
+===========================================
+
+Changes:
+
+- Remove redundant event\_auth index (PR #1113)
+- Reduce DB hits for replication (PR #1141)
+- Implement pluggable password auth (PR #1155)
+- Remove rate limiting from app service senders and fix get\_or\_create\_user requester, thanks to Patrik Oldsberg (PR #1157)
+- window.postmessage for Interactive Auth fallback (PR #1159)
+- Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162)
+- Add config option for adding additional TLS fingerprints (PR #1167)
+- User-interactive auth on delete device (PR #1168)
+
+Bug fixes:
+
+- Fix not being allowed to set your own state\_key, thanks to Patrik Oldsberg (PR #1150)
+- Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166)
+- Fix email push notifs being dropped (PR #1169)
+
+Changes in synapse v0.18.1 (2016-10-05)
+=======================================
+
+No changes since v0.18.1-rc1
+
+Changes in synapse v0.18.1-rc1 (2016-09-30)
+===========================================
+
+Features:
+
+- Add total\_room\_count\_estimate to `/publicRooms` (PR #1133)
+
+Changes:
+
+- Time out typing over federation (PR #1140)
+- Restructure LDAP authentication (PR #1153)
+
+Bug fixes:
+
+- Fix 3pid invites when server is already in the room (PR #1136)
+- Fix upgrading with SQLite taking lots of CPU for a few days after upgrade (PR #1144)
+- Fix upgrading from very old database versions (PR #1145)
+- Fix port script to work with recently added tables (PR #1146)
+
+Changes in synapse v0.18.0 (2016-09-19)
+=======================================
+
+The release includes major changes to the state storage database schemas, which significantly reduce database size. Synapse will attempt to upgrade the current data in the background. Servers with large SQLite database may experience degradation of performance while this upgrade is in progress, therefore you may want to consider migrating to using Postgres before upgrading very large SQLite databases
+
+Changes:
+
+- Make public room search case insensitive (PR #1127)
+
+Bug fixes:
+
+- Fix and clean up publicRooms pagination (PR #1129)
+
+Changes in synapse v0.18.0-rc1 (2016-09-16)
+===========================================
+
+Features:
+
+- Add `only=highlight` on `/notifications` (PR #1081)
+- Add server param to /publicRooms (PR #1082)
+- Allow clients to ask for the whole of a single state event (PR #1094)
+- Add is\_direct param to /createRoom (PR #1108)
+- Add pagination support to publicRooms (PR #1121)
+- Add very basic filter API to /publicRooms (PR #1126)
+- Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111)
+
+Changes:
+
+- Move to storing state\_groups\_state as deltas, greatly reducing DB size (PR #1065)
+- Reduce amount of state pulled out of the DB during common requests (PR #1069)
+- Allow PDF to be rendered from media repo (PR #1071)
+- Reindex state\_groups\_state after pruning (PR #1085)
+- Clobber EDUs in send queue (PR #1095)
+- Conform better to the CAS protocol specification (PR #1100)
+- Limit how often we ask for keys from dead servers (PR #1114)
+
+Bug fixes:
+
+- Fix /notifications API when used with `from` param (PR #1080)
+- Fix backfill when cannot find an event. (PR #1107)
+
+Changes in synapse v0.17.3 (2016-09-09)
+=======================================
+
+This release fixes a major bug that stopped servers from handling rooms with over 1000 members.
+
+Changes in synapse v0.17.2 (2016-09-08)
+=======================================
+
+This release contains security bug fixes. Please upgrade.
+
+No changes since v0.17.2-rc1
+
+Changes in synapse v0.17.2-rc1 (2016-09-05)
+===========================================
+
+Features:
+
+- Start adding store-and-forward direct-to-device messaging (PR #1046, #1050, #1062, #1066)
+
+Changes:
+
+- Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068)
+- Don\'t notify for online to online presence transitions. (PR #1054)
+- Occasionally persist unpersisted presence updates (PR #1055)
+- Allow application services to have an optional \'url\' (PR #1056)
+- Clean up old sent transactions from DB (PR #1059)
+
+Bug fixes:
+
+- Fix None check in backfill (PR #1043)
+- Fix membership changes to be idempotent (PR #1067)
+- Fix bug in get\_pdu where it would sometimes return events with incorrect signature
+
+Changes in synapse v0.17.1 (2016-08-24)
+=======================================
+
+Changes:
+
+- Delete old received\_transactions rows (PR #1038)
+- Pass through user-supplied content in /join/\$room\_id (PR #1039)
+
+Bug fixes:
+
+- Fix bug with backfill (PR #1040)
+
+Changes in synapse v0.17.1-rc1 (2016-08-22)
+===========================================
+
+Features:
+
+- Add notification API (PR #1028)
+
+Changes:
+
+- Don\'t print stack traces when failing to get remote keys (PR #996)
+- Various federation /event/ perf improvements (PR #998)
+- Only process one local membership event per room at a time (PR #1005)
+- Move default display name push rule (PR #1011, #1023)
+- Fix up preview URL API. Add tests. (PR #1015)
+- Set `Content-Security-Policy` on media repo (PR #1021)
+- Make notify\_interested\_services faster (PR #1022)
+- Add usage stats to prometheus monitoring (PR #1037)
+
+Bug fixes:
+
+- Fix token login (PR #993)
+- Fix CAS login (PR #994, #995)
+- Fix /sync to not clobber status\_msg (PR #997)
+- Fix redacted state events to include prev\_content (PR #1003)
+- Fix some bugs in the auth/ldap handler (PR #1007)
+- Fix backfill request to limit URI length, so that remotes don\'t reject the requests due to path length limits (PR #1012)
+- Fix AS push code to not send duplicate events (PR #1025)
+
+Changes in synapse v0.17.0 (2016-08-08)
+=======================================
+
+This release contains significant security bug fixes regarding authenticating events received over federation. PLEASE UPGRADE.
+
+This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+
+Changes:
+
+- Add federation /version API (PR #990)
+- Make psutil dependency optional (PR #992)
+
+Bug fixes:
+
+- Fix URL preview API to exclude HTML comments in description (PR #988)
+- Fix error handling of remote joins (PR #991)
+
+Changes in synapse v0.17.0-rc4 (2016-08-05)
+===========================================
+
+Changes:
+
+- Change the way we summarize URLs when previewing (PR #973)
+- Add new `/state_ids/` federation API (PR #979)
+- Speed up processing of `/state/` response (PR #986)
+
+Bug fixes:
+
+- Fix event persistence when event has already been partially persisted (PR #975, #983, #985)
+- Fix port script to also copy across backfilled events (PR #982)
+
+Changes in synapse v0.17.0-rc3 (2016-08-02)
+===========================================
+
+Changes:
+
+- Forbid non-ASes from registering users whose names begin with \'\_\' (PR #958)
+- Add some basic admin API docs (PR #963)
+
+Bug fixes:
+
+- Send the correct host header when fetching keys (PR #941)
+- Fix joining a room that has missing auth events (PR #964)
+- Fix various push bugs (PR #966, #970)
+- Fix adding emails on registration (PR #968)
+
+Changes in synapse v0.17.0-rc2 (2016-08-02)
+===========================================
+
+(This release did not include the changes advertised and was identical to RC1)
+
+Changes in synapse v0.17.0-rc1 (2016-07-28)
+===========================================
+
+This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+
+Features:
+
+- Add purge\_media\_cache admin API (PR #902)
+- Add deactivate account admin API (PR #903)
+- Add optional pepper to password hashing (PR #907, #910 by KentShikama)
+- Add an admin option to shared secret registration (breaks backwards compat) (PR #909)
+- Add purge local room history API (PR #911, #923, #924)
+- Add requestToken endpoints (PR #915)
+- Add an /account/deactivate endpoint (PR #921)
+- Add filter param to /messages. Add \'contains\_url\' to filter. (PR #922)
+- Add device\_id support to /login (PR #929)
+- Add device\_id support to /v2/register flow. (PR #937, #942)
+- Add GET /devices endpoint (PR #939, #944)
+- Add GET /device/{deviceId} (PR #943)
+- Add update and delete APIs for devices (PR #949)
+
+Changes:
+
+- Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
+- Linearize some federation endpoints based on (origin, room\_id) (PR #879)
+- Remove the legacy v0 content upload API. (PR #888)
+- Use similar naming we use in email notifs for push (PR #894)
+- Optionally include password hash in createUser endpoint (PR #905 by KentShikama)
+- Use a query that postgresql optimises better for get\_events\_around (PR #906)
+- Fall back to \'username\' if \'user\' is not given for appservice registration. (PR #927 by Half-Shot)
+- Add metrics for psutil derived memory usage (PR #936)
+- Record device\_id in client\_ips (PR #938)
+- Send the correct host header when fetching keys (PR #941)
+- Log the hostname the reCAPTCHA was completed on (PR #946)
+- Make the device id on e2e key upload optional (PR #956)
+- Add r0.2.0 to the \"supported versions\" list (PR #960)
+- Don\'t include name of room for invites in push (PR #961)
+
+Bug fixes:
+
+- Fix substitution failure in mail template (PR #887)
+- Put most recent 20 messages in email notif (PR #892)
+- Ensure that the guest user is in the database when upgrading accounts (PR #914)
+- Fix various edge cases in auth handling (PR #919)
+- Fix 500 ISE when sending alias event without a state\_key (PR #925)
+- Fix bug where we stored rejections in the state\_group, persist all rejections (PR #948)
+- Fix lack of check of if the user is banned when handling 3pid invites (PR #952)
+- Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
+
+Changes in synapse v0.16.1-r1 (2016-07-08)
+==========================================
+
+THIS IS A CRITICAL SECURITY UPDATE.
+
+This fixes a bug which allowed users\' accounts to be accessed by unauthorised users.
+
+Changes in synapse v0.16.1 (2016-06-20)
+=======================================
+
+Bug fixes:
+
+- Fix assorted bugs in `/preview_url` (PR #872)
+- Fix TypeError when setting unicode passwords (PR #873)
+
+Performance improvements:
+
+- Turn `use_frozen_events` off by default (PR #877)
+- Disable responding with canonical json for federation (PR #878)
+
+Changes in synapse v0.16.1-rc1 (2016-06-15)
+===========================================
+
+Features: None
+
+Changes:
+
+- Log requester for `/publicRoom` endpoints when possible (PR #856)
+- 502 on `/thumbnail` when can\'t connect to remote server (PR #862)
+- Linearize fetching of gaps on incoming events (PR #871)
+
+Bugs fixes:
+
+- Fix bug where rooms where marked as published by default (PR #857)
+- Fix bug where joining room with an event with invalid sender (PR #868)
+- Fix bug where backfilled events were sent down sync streams (PR #869)
+- Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable (PR #870)
+
+Performance improvements:
+
+- Improve `/publicRooms` performance(PR #859)
+
+Changes in synapse v0.16.0 (2016-06-09)
+=======================================
+
+NB: As of v0.14 all AS config files must have an ID field.
+
+Bug fixes:
+
+- Don\'t make rooms published by default (PR #857)
+
+Changes in synapse v0.16.0-rc2 (2016-06-08)
+===========================================
+
+Features:
+
+- Add configuration option for tuning GC via `gc.set_threshold` (PR #849)
+
+Changes:
+
+- Record metrics about GC (PR #771, #847, #852)
+- Add metric counter for number of persisted events (PR #841)
+
+Bug fixes:
+
+- Fix \'From\' header in email notifications (PR #843)
+- Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842)
+- Fix bug where synapse sent malformed transactions to AS\'s when retrying transactions (Commits 310197b, 8437906)
+
+Performance improvements:
+
+- Remove event fetching from DB threads (PR #835)
+- Change the way we cache events (PR #836)
+- Add events to cache when we persist them (PR #840)
+
+Changes in synapse v0.16.0-rc1 (2016-06-03)
+===========================================
+
+Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
+
+Features:
+
+- Add email notifications for missed messages (PR #759, #786, #799, #810, #815, #821)
+- Add a `url_preview_ip_range_whitelist` config param (PR #760)
+- Add /report endpoint (PR #762)
+- Add basic ignore user API (PR #763)
+- Add an openidish mechanism for proving that you own a given user\_id (PR #765)
+- Allow clients to specify a server\_name to avoid \'No known servers\' (PR #794)
+- Add secondary\_directory\_servers option to fetch room list from other servers (PR #808, #813)
+
+Changes:
+
+- Report per request metrics for all of the things using request\_handler (PR #756)
+- Correctly handle `NULL` password hashes from the database (PR #775)
+- Allow receipts for events we haven\'t seen in the db (PR #784)
+- Make synctl read a cache factor from config file (PR #785)
+- Increment badge count per missed convo, not per msg (PR #793)
+- Special case m.room.third\_party\_invite event auth to match invites (PR #814)
+
+Bug fixes:
+
+- Fix typo in event\_auth servlet path (PR #757)
+- Fix password reset (PR #758)
+
+Performance improvements:
+
+- Reduce database inserts when sending transactions (PR #767)
+- Queue events by room for persistence (PR #768)
+- Add cache to `get_user_by_id` (PR #772)
+- Add and use `get_domain_from_id` (PR #773)
+- Use tree cache for `get_linearized_receipts_for_room` (PR #779)
+- Remove unused indices (PR #782)
+- Add caches to `bulk_get_push_rules*` (PR #804)
+- Cache `get_event_reference_hashes` (PR #806)
+- Add `get_users_with_read_receipts_in_room` cache (PR #809)
+- Use state to calculate `get_users_in_room` (PR #811)
+- Load push rules in storage layer so that they get cached (PR #825)
+- Make `get_joined_hosts_for_room` use get\_users\_in\_room (PR #828)
+- Poke notifier on next reactor tick (PR #829)
+- Change CacheMetrics to be quicker (PR #830)
+
+Changes in synapse v0.15.0-rc1 (2016-04-26)
+===========================================
+
+Features:
+
+- Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck (PR #671,\#687)
+- Add URL previewing support (PR #688)
+- Add login support for LDAP, thanks to Christoph Witzany (PR #701)
+- Add GET endpoint for pushers (PR #716)
+
+Changes:
+
+- Never notify for member events (PR #667)
+- Deduplicate identical `/sync` requests (PR #668)
+- Require user to have left room to forget room (PR #673)
+- Use DNS cache if within TTL (PR #677)
+- Let users see their own leave events (PR #699)
+- Deduplicate membership changes (PR #700)
+- Increase performance of pusher code (PR #705)
+- Respond with error status 504 if failed to talk to remote server (PR #731)
+- Increase search performance on postgres (PR #745)
+
+Bug fixes:
+
+- Fix bug where disabling all notifications still resulted in push (PR #678)
+- Fix bug where users couldn\'t reject remote invites if remote refused (PR #691)
+- Fix bug where synapse attempted to backfill from itself (PR #693)
+- Fix bug where profile information was not correctly added when joining remote rooms (PR #703)
+- Fix bug where register API required incorrect key name for AS registration (PR #727)
+
+Changes in synapse v0.14.0 (2016-03-30)
+=======================================
+
+No changes from v0.14.0-rc2
+
+Changes in synapse v0.14.0-rc2 (2016-03-23)
+===========================================
+
+Features:
+
+- Add published room list API (PR #657)
+
+Changes:
+
+- Change various caches to consume less memory (PR #656, #658, #660, #662, #663, #665)
+- Allow rooms to be published without requiring an alias (PR #664)
+- Intern common strings in caches to reduce memory footprint (\#666)
+
+Bug fixes:
+
+- Fix reject invites over federation (PR #646)
+- Fix bug where registration was not idempotent (PR #649)
+- Update aliases event after deleting aliases (PR #652)
+- Fix unread notification count, which was sometimes wrong (PR #661)
+
+Changes in synapse v0.14.0-rc1 (2016-03-14)
+===========================================
+
+Features:
+
+- Add event\_id to response to state event PUT (PR #581)
+- Allow guest users access to messages in rooms they have joined (PR #587)
+- Add config for what state is included in a room invite (PR #598)
+- Send the inviter\'s member event in room invite state (PR #607)
+- Add error codes for malformed/bad JSON in /login (PR #608)
+- Add support for changing the actions for default rules (PR #609)
+- Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612)
+- Add ability for alias creators to delete aliases (PR #614)
+- Add profile information to invites (PR #624)
+
+Changes:
+
+- Enforce user\_id exclusivity for AS registrations (PR #572)
+- Make adding push rules idempotent (PR #587)
+- Improve presence performance (PR #582, #586)
+- Change presence semantics for `last_active_ago` (PR #582, #586)
+- Don\'t allow `m.room.create` to be changed (PR #596)
+- Add 800x600 to default list of valid thumbnail sizes (PR #616)
+- Always include kicks and bans in full /sync (PR #625)
+- Send history visibility on boundary changes (PR #626)
+- Register endpoint now returns a refresh\_token (PR #637)
+
+Bug fixes:
+
+- Fix bug where we returned incorrect state in /sync (PR #573)
+- Always return a JSON object from push rule API (PR #606)
+- Fix bug where registering without a user id sometimes failed (PR #610)
+- Report size of ExpiringCache in cache size metrics (PR #611)
+- Fix rejection of invites to empty rooms (PR #615)
+- Fix usage of `bcrypt` to not use `checkpw` (PR #619)
+- Pin `pysaml2` dependency (PR #634)
+- Fix bug in `/sync` where timeline order was incorrect for backfilled events (PR #635)
+
+Changes in synapse v0.13.3 (2016-02-11)
+=======================================
+
+- Fix bug where `/sync` would occasionally return events in the wrong room.
+
+Changes in synapse v0.13.2 (2016-02-11)
+=======================================
+
+- Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request (PR #570)
+
+Changes in synapse v0.13.1 (2016-02-10)
+=======================================
+
+- Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to pull in the fix for SYWEB-361 so that the default client can display HTML messages again(!)
+
+Changes in synapse v0.13.0 (2016-02-10)
+=======================================
+
+This version includes an upgrade of the schema, specifically adding an index to the `events` table. This may cause synapse to pause for several minutes the first time it is started after the upgrade.
+
+Changes:
+
+- Improve general performance (PR #540, #543. \#544, #54, #549, #567)
+- Change guest user ids to be incrementing integers (PR #550)
+- Improve performance of public room list API (PR #552)
+- Change profile API to omit keys rather than return null (PR #557)
+- Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/` (PR #595)
+
+Bug fixes:
+
+- Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device (PR #547)
+- Fix bug where unread count could be wrong (PR #568)
+
+Changes in synapse v0.12.1-rc1 (2016-01-29)
+===========================================
+
+Features:
+
+- Add unread notification counts in `/sync` (PR #456)
+- Add support for inviting 3pids in `/createRoom` (PR #460)
+- Add ability for guest accounts to upgrade (PR #462)
+- Add `/versions` API (PR #468)
+- Add `event` to `/context` API (PR #492)
+- Add specific error code for invalid user names in `/register` (PR #499)
+- Add support for push badge counts (PR #507)
+- Add support for non-guest users to peek in rooms using `/events` (PR #510)
+
+Changes:
+
+- Change `/sync` so that guest users only get rooms they\'ve joined (PR #469)
+- Change to require unbanning before other membership changes (PR #501)
+- Change default push rules to notify for all messages (PR #486)
+- Change default push rules to not notify on membership changes (PR #514)
+- Change default push rules in one to one rooms to only notify for events that are messages (PR #529)
+- Change `/sync` to reject requests with a `from` query param (PR #512)
+- Change server manhole to use SSH rather than telnet (PR #473)
+- Change server to require AS users to be registered before use (PR #487)
+- Change server not to start when ASes are invalidly configured (PR #494)
+- Change server to require ID and `as_token` to be unique for AS\'s (PR #496)
+- Change maximum pagination limit to 1000 (PR #497)
+
+Bug fixes:
+
+- Fix bug where `/sync` didn\'t return when something under the leave key changed (PR #461)
+- Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464)
+- Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475)
+- Fix bug where we occasionally still logged access tokens (PR #477)
+- Fix bug where `/events` would always return immediately for guest users (PR #480)
+- Fix bug where `/sync` unexpectedly returned old left rooms (PR #481)
+- Fix enabling and disabling push rules (PR #498)
+- Fix bug where `/register` returned 500 when given unicode username (PR #513)
+
+Changes in synapse v0.12.0 (2016-01-04)
+=======================================
+
+- Expose `/login` under `r0` (PR #459)
+
+Changes in synapse v0.12.0-rc3 (2015-12-23)
+===========================================
+
+- Allow guest accounts access to `/sync` (PR #455)
+- Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454)
+- Include urls for room avatars in the response to `/publicRooms` (PR #453)
+- Don\'t set a identicon as the avatar for a user when they register (PR #450)
+- Add a `display_name` to third-party invites (PR #449)
+- Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446)
+- Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457)
+- Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally (PR #452)
+- Fix 500 when pagination search results (PR #447)
+- Fix a bug where synapse was leaking raw email address in third-party invites (PR #448)
+
+Changes in synapse v0.12.0-rc2 (2015-12-14)
+===========================================
+
+- Add caches for whether rooms have been forgotten by a user (PR #434)
+- Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI (PR #436)
+- Parallelise the processing of `/sync` requests (PR #437)
+- Fix race updating presence in `/events` (PR #444)
+- Fix bug back-populating search results (PR #441)
+- Fix bug calculating state in `/sync` requests (PR #442)
+
+Changes in synapse v0.12.0-rc1 (2015-12-10)
+===========================================
+
+- Host the client APIs released as r0 by <https://matrix.org/docs/spec/r0.0.0/client_server.html> on paths prefixed by `/_matrix/client/r0`. (PR #430, PR #415, PR #400)
+- Updates the client APIs to match r0 of the matrix specification.
+ - All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. (PR #402)
+ - Search results are now given as a JSON array rather than a JSON object (PR #405)
+ - Miscellaneous changes to search (PR #403, PR #406, PR #412)
+ - Filter JSON objects may now be passed as query parameters to `/sync` (PR #431)
+ - Fix implementation of `/admin/whois` (PR #418)
+ - Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423)
+ - Don\'t push for `m.room.message` by default (PR #411)
+ - Add API for setting per account user data (PR #392)
+ - Allow users to forget rooms (PR #385)
+- Performance improvements and monitoring:
+ - Add per-request counters for CPU time spent on the main python thread. (PR #421, PR #420)
+ - Add per-request counters for time spent in the database (PR #429)
+ - Make state updates in the C+S API idempotent (PR #416)
+ - Only fire `user_joined_room` if the user has actually joined. (PR #410)
+ - Reuse a single http client, rather than creating new ones (PR #413)
+- Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
+
+Changes in synapse v0.11.1 (2015-11-20)
+=======================================
+
+- Add extra options to search API (PR #394)
+- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to ressurected servers, even when they were receiving traffic from them (PR #393)
+- Don\'t advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391)
+- Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386)
+- Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389)
+
+Changes in synapse v0.11.0-r2 (2015-11-19)
+==========================================
+
+- Fix bug in database port script (PR #387)
+
+Changes in synapse v0.11.0-r1 (2015-11-18)
+==========================================
+
+- Retry and fail federation requests more aggressively for requests that block client side requests (PR #384)
+
+Changes in synapse v0.11.0 (2015-11-17)
+=======================================
+
+- Change CAS login API (PR #349)
+
+Changes in synapse v0.11.0-rc2 (2015-11-13)
+===========================================
+
+- Various changes to /sync API response format (PR #373)
+- Fix regression when setting display name in newly joined room over federation (PR #368)
+- Fix problem where /search was slow when using SQLite (PR #366)
+
+Changes in synapse v0.11.0-rc1 (2015-11-11)
+===========================================
+
+- Add Search API (PR #307, #324, #327, #336, #350, #359)
+- Add \'archived\' state to v2 /sync API (PR #316)
+- Add ability to reject invites (PR #317)
+- Add config option to disable password login (PR #322)
+- Add the login fallback API (PR #330)
+- Add room context API (PR #334)
+- Add room tagging support (PR #335)
+- Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
+- Change retry schedule for application services (PR #320)
+- Change retry schedule for remote servers (PR #340)
+- Fix bug where we hosted static content in the incorrect place (PR #329)
+- Fix bug where we didn\'t increment retry interval for remote servers (PR #343)
+
+Changes in synapse v0.10.1-rc1 (2015-10-15)
+===========================================
+
+- Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
+- Add support for using macaroons for `access_token` (PR #256, #229)
+- Add support for `m.room.canonical_alias` (PR #287)
+- Add support for viewing the history of rooms that they have left. (PR #276, #294)
+- Add support for refresh tokens (PR #240)
+- Add flag on creation which disables federation of the room (PR #279)
+- Add some room state to invites. (PR #275)
+- Atomically persist events when joining a room over federation (PR #283)
+- Change default history visibility for private rooms (PR #271)
+- Allow users to redact their own sent events (PR #262)
+- Use tox for tests (PR #247)
+- Split up syutil into separate libraries (PR #243)
+
+Changes in synapse v0.10.0-r2 (2015-09-16)
+==========================================
+
+- Fix bug where we always fetched remote server signing keys instead of using ones in our cache.
+- Fix adding threepids to an existing account.
+- Fix bug with invinting over federation where remote server was already in the room. (PR #281, SYN-392)
+
+Changes in synapse v0.10.0-r1 (2015-09-08)
+==========================================
+
+- Fix bug with python packaging
+
+Changes in synapse v0.10.0 (2015-09-03)
+=======================================
+
+No change from release candidate.
+
+Changes in synapse v0.10.0-rc6 (2015-09-02)
+===========================================
+
+- Remove some of the old database upgrade scripts.
+- Fix database port script to work with newly created sqlite databases.
+
+Changes in synapse v0.10.0-rc5 (2015-08-27)
+===========================================
+
+- Fix bug that broke downloading files with ascii filenames across federation.
+
+Changes in synapse v0.10.0-rc4 (2015-08-27)
+===========================================
+
+- Allow UTF-8 filenames for upload. (PR #259)
+
+Changes in synapse v0.10.0-rc3 (2015-08-25)
+===========================================
+
+- Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250)
+- Allow `--config-path` to specify a directory, causing synapse to use all \*.yaml files in the directory as config files. (PR #249)
+- Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245)
+- Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example:
+
+ $ python -m synapse.config read server_name -c homeserver.yaml
+ localhost
+
+ (PR #246)
+
+Changes in synapse v0.10.0-rc2 (2015-08-24)
+===========================================
+
+- Fix bug where we incorrectly populated the `event_forward_extremities` table, resulting in problems joining large remote rooms (e.g. `#matrix:matrix.org`)
+- Reduce the number of times we wake up pushers by not listening for presence or typing events, reducing the CPU cost of each pusher.
+
+Changes in synapse v0.10.0-rc1 (2015-08-21)
+===========================================
+
+Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
+
+General:
+
+- Upgrade to Twisted 15 (PR #173)
+- Add support for serving and fetching encryption keys over federation. (PR #208)
+- Add support for logging in with email address (PR #234)
+- Add support for new `m.room.canonical_alias` event. (PR #233)
+- Change synapse to treat user IDs case insensitively during registration and login. (If two users already exist with case insensitive matching user ids, synapse will continue to require them to specify their user ids exactly.)
+- Error if a user tries to register with an email already in use. (PR #211)
+- Add extra and improve existing caches (PR #212, #219, #226, #228)
+- Batch various storage request (PR #226, #228)
+- Fix bug where we didn\'t correctly log the entity that triggered the request if the request came in via an application service (PR #230)
+- Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232)
+- Add support for AS\'s to use v2\_alpha registration API (PR #210)
+
+Configuration:
+
+- Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. (PR #220)
+- `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. (PR #220)
+- Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. (PR #213)
+
+Media Repository:
+
+- Fix bug where we picked a lower resolution image than requested. (PR #205)
+- Add support for specifying if a the media repository should dynamically thumbnail images or not. (PR #206)
+
+Metrics:
+
+- Add statistics from the reactor to the metrics API. (PR #224, #225)
+
+Demo Homeservers:
+
+- Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
+- Fix enabling registration on demo homeservers (PR #223)
+
+Changes in synapse v0.9.4-rc1 (2015-07-21)
+==========================================
+
+General:
+
+- Add basic implementation of receipts. (SPEC-99)
+- Add support for configuration presets in room creation API. (PR #203)
+- Add auth event that limits the visibility of history for new users. (SPEC-134)
+- Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
+- Add client side key management APIs for end to end encryption. (PR #198)
+- Change power level semantics so that you cannot kick, ban or change power levels of users that have equal or greater power level than you. (SYN-192)
+- Improve performance by bulk inserting events where possible. (PR #193)
+- Improve performance by bulk verifying signatures where possible. (PR #194)
+
+Configuration:
+
+- Add support for including TLS certificate chains.
+
+Media Repository:
+
+- Add Content-Disposition headers to content repository responses. (SYN-150)
+
+Changes in synapse v0.9.3 (2015-07-01)
+======================================
+
+No changes from v0.9.3 Release Candidate 1.
+
+Changes in synapse v0.9.3-rc1 (2015-06-23)
+==========================================
+
+General:
+
+- Fix a memory leak in the notifier. (SYN-412)
+- Improve performance of room initial sync. (SYN-418)
+- General improvements to logging.
+- Remove `access_token` query params from `INFO` level logging.
+
+Configuration:
+
+- Add support for specifying and configuring multiple listeners. (SYN-389)
+
+Application services:
+
+- Fix bug where synapse failed to send user queries to application services.
+
+Changes in synapse v0.9.2-r2 (2015-06-15)
+=========================================
+
+Fix packaging so that schema delta python files get included in the package.
+
+Changes in synapse v0.9.2 (2015-06-12)
+======================================
+
+General:
+
+- Use ultrajson for json (de)serialisation when a canonical encoding is not required. Ultrajson is significantly faster than simplejson in certain circumstances.
+- Use connection pools for outgoing HTTP connections.
+- Process thumbnails on separate threads.
+
+Configuration:
+
+- Add option, `gzip_responses`, to disable HTTP response compression.
+
+Federation:
+
+- Improve resilience of backfill by ensuring we fetch any missing auth events.
+- Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we\'d previously handled as well as attempting to compute the current state for outliers.
+
+Changes in synapse v0.9.1 (2015-05-26)
+======================================
+
+General:
+
+- Add support for backfilling when a client paginates. This allows servers to request history for a room from remote servers when a client tries to paginate history the server does not have - SYN-36
+- Fix bug where you couldn\'t disable non-default pushrules - SYN-378
+- Fix `register_new_user` script - SYN-359
+- Improve performance of fetching events from the database, this improves both initialSync and sending of events.
+- Improve performance of event streams, allowing synapse to handle more simultaneous connected clients.
+
+Federation:
+
+- Fix bug with existing backfill implementation where it returned the wrong selection of events in some circumstances.
+- Improve performance of joining remote rooms.
+
+Configuration:
+
+- Add support for changing the bind host of the metrics listener via the `metrics_bind_host` option.
+
+Changes in synapse v0.9.0-r5 (2015-05-21)
+=========================================
+
+- Add more database caches to reduce amount of work done for each pusher. This radically reduces CPU usage when multiple pushers are set up in the same room.
+
+Changes in synapse v0.9.0 (2015-05-07)
+======================================
+
+General:
+
+- Add support for using a PostgreSQL database instead of SQLite. See [docs/postgres.rst](docs/postgres.rst) for details.
+- Add password change and reset APIs. See [Registration](https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration) in the spec.
+- Fix memory leak due to not releasing stale notifiers - SYN-339.
+- Fix race in caches that occasionally caused some presence updates to be dropped - SYN-369.
+- Check server name has not changed on restart.
+- Add a sample systemd unit file and a logger configuration in contrib/systemd. Contributed Ivan Shapovalov.
+
+Federation:
+
+- Add key distribution mechanisms for fetching public keys of unavailable remote home servers. See [Retrieving Server Keys](https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys) in the spec.
+
+Configuration:
+
+- Add support for multiple config files.
+- Add support for dictionaries in config files.
+- Remove support for specifying config options on the command line, except for:
+ - `--daemonize` - Daemonize the home server.
+ - `--manhole` - Turn on the twisted telnet manhole service on the given port.
+ - `--database-path` - The path to a sqlite database to use.
+ - `--verbose` - The verbosity level.
+ - `--log-file` - File to log to.
+ - `--log-config` - Python logging config file.
+ - `--enable-registration` - Enable registration for new users.
+
+Application services:
+
+- Reliably retry sending of events from Synapse to application services, as per [Application Services](https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api) spec.
+- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application\_services.rst](docs/application_services.rst) for more information.
+
+Changes in synapse v0.8.1 (2015-03-18)
+======================================
+
+- Disable registration by default. New users can be added using the command `register_new_matrix_user` or by enabling registration in the config.
+- Add metrics to synapse. To enable metrics use config options `enable_metrics` and `metrics_port`.
+- Fix bug where banning only kicked the user.
+
+Changes in synapse v0.8.0 (2015-03-06)
+======================================
+
+General:
+
+- Add support for registration fallback. This is a page hosted on the server which allows a user to register for an account, regardless of what client they are using (e.g. mobile devices).
+- Added new default push rules and made them configurable by clients:
+ - Suppress all notice messages.
+ - Notify when invited to a new room.
+ - Notify for messages that don\'t match any rule.
+ - Notify on incoming call.
+
+Federation:
+
+- Added per host server side rate-limiting of incoming federation requests.
+- Added a `/get_missing_events/` API to federation to reduce number of `/events/` requests.
+
+Configuration:
+
+- Added configuration option to disable registration: `disable_registration`.
+- Added configuration option to change soft limit of number of open file descriptors: `soft_file_limit`.
+- Make `tls_private_key_path` optional when running with `no_tls`.
+
+Application services:
+
+- Application services can now poll on the CS API `/events` for their events, by providing their application service `access_token`.
+- Added exclusive namespace support to application services API.
+
+Changes in synapse v0.7.1 (2015-02-19)
+======================================
+
+- Initial alpha implementation of parts of the Application Services API. Including:
+ - AS Registration / Unregistration
+ - User Query API
+ - Room Alias Query API
+ - Push transport for receiving events.
+ - User/Alias namespace admin control
+- Add cache when fetching events from remote servers to stop repeatedly fetching events with bad signatures.
+- Respect the per remote server retry scheme when fetching both events and server keys to reduce the number of times we send requests to dead servers.
+- Inform remote servers when the local server fails to handle a received event.
+- Turn off python bytecode generation due to problems experienced when upgrading from previous versions.
+
+Changes in synapse v0.7.0 (2015-02-12)
+======================================
+
+- Add initial implementation of the query auth federation API, allowing servers to agree on whether an event should be allowed or rejected.
+- Persist events we have rejected from federation, fixing the bug where servers would keep requesting the same events.
+- Various federation performance improvements, including:
+ - Add in memory caches on queries such as:
+
+ > - Computing the state of a room at a point in time, used for authorization on federation requests.
+ > - Fetching events from the database.
+ > - User\'s room membership, used for authorizing presence updates.
+
+ - Upgraded JSON library to improve parsing and serialisation speeds.
+
+- Add default avatars to new user accounts using pydenticon library.
+- Correctly time out federation requests.
+- Retry federation requests against different servers.
+- Add support for push and push rules.
+- Add alpha versions of proposed new CSv2 APIs, including `/sync` API.
+
+Changes in synapse 0.6.1 (2015-01-07)
+=====================================
+
+- Major optimizations to improve performance of initial sync and event sending in large rooms (by up to 10x)
+- Media repository now includes a Content-Length header on media downloads.
+- Improve quality of thumbnails by changing resizing algorithm.
+
+Changes in synapse 0.6.0 (2014-12-16)
+=====================================
+
+- Add new API for media upload and download that supports thumbnailing.
+- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the \--content-addr parameter and confusion over accessing content directly from remote homeservers.
+- Implement exponential backoff when retrying federation requests when sending to remote homeservers which are offline.
+- Implement typing notifications.
+- Fix bugs where we sent events with invalid signatures due to bugs where we incorrectly persisted events.
+- Improve performance of database queries involving retrieving events.
+
+Changes in synapse 0.5.4a (2014-12-13)
+======================================
+
+- Fix bug while generating the error message when a file path specified in the config doesn\'t exist.
+
+Changes in synapse 0.5.4 (2014-12-03)
+=====================================
+
+- Fix presence bug where some rooms did not display presence updates for remote users.
+- Do not log SQL timing log lines when started with \"-v\"
+- Fix potential memory leak.
+
+Changes in synapse 0.5.3c (2014-12-02)
+======================================
+
+- Change the default value for the content\_addr option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate.
+
+Changes in synapse 0.5.3 (2014-11-27)
+=====================================
+
+- Fix bug that caused joining a remote room to fail if a single event was not signed correctly.
+- Fix bug which caused servers to continuously try and fetch events from other servers.
+
+Changes in synapse 0.5.2 (2014-11-26)
+=====================================
+
+Fix major bug that caused rooms to disappear from peoples initial sync.
+
+Changes in synapse 0.5.1 (2014-11-26)
+=====================================
+
+See UPGRADES.rst for specific instructions on how to upgrade.
+
+> - Fix bug where we served up an Event that did not match its signatures.
+> - Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn\'t recognise (but is in.)
+
+Changes in synapse 0.5.0 (2014-11-19)
+=====================================
+
+This release includes changes to the federation protocol and client-server API that is not backwards compatible.
+
+This release also changes the internal database schemas and so requires servers to drop their current history. See UPGRADES.rst for details.
+
+Homeserver:
+
+: - Add authentication and authorization to the federation protocol. Events are now signed by their originating homeservers.
+ - Implement the new authorization model for rooms.
+ - Split out web client into a seperate repository: matrix-angular-sdk.
+ - Change the structure of PDUs.
+ - Fix bug where user could not join rooms via an alias containing 4-byte UTF-8 characters.
+ - Merge concept of PDUs and Events internally.
+ - Improve logging by adding request ids to log lines.
+ - Implement a very basic room initial sync API.
+ - Implement the new invite/join federation APIs.
+
+Webclient:
+
+: - The webclient has been moved to a seperate repository.
+
+Changes in synapse 0.4.2 (2014-10-31)
+=====================================
+
+Homeserver:
+
+: - Fix bugs where we did not notify users of correct presence updates.
+ - Fix bug where we did not handle sub second event stream timeouts.
+
+Webclient:
+
+: - Add ability to click on messages to see JSON.
+ - Add ability to redact messages.
+ - Add ability to view and edit all room state JSON.
+ - Handle incoming redactions.
+ - Improve feedback on errors.
+ - Fix bugs in mobile CSS.
+ - Fix bugs with desktop notifications.
+
+Changes in synapse 0.4.1 (2014-10-17)
+=====================================
+
+Webclient:
+
+: - Fix bug with display of timestamps.
+
+Changes in synpase 0.4.0 (2014-10-17)
+=====================================
+
+This release includes changes to the federation protocol and client-server API that is not backwards compatible.
+
+The Matrix specification has been moved to a separate git repository: <http://github.com/matrix-org/matrix-doc>
+
+You will also need an updated syutil and config. See UPGRADES.rst.
+
+Homeserver:
+
+: - Sign federation transactions to assert strong identity over federation.
+ - Rename timestamp keys in PDUs and events from \'ts\' and \'hsob\_ts\' to \'origin\_server\_ts\'.
+
+Changes in synapse 0.3.4 (2014-09-25)
+=====================================
+
+This version adds support for using a TURN server. See docs/turn-howto.rst on how to set one up.
+
+Homeserver:
+
+: - Add support for redaction of messages.
+ - Fix bug where inviting a user on a remote home server could take up to 20-30s.
+ - Implement a get current room state API.
+ - Add support specifying and retrieving turn server configuration.
+
+Webclient:
+
+: - Add button to send messages to users from the home page.
+ - Add support for using TURN for VoIP calls.
+ - Show display name change messages.
+ - Fix bug where the client didn\'t get the state of a newly joined room until after it has been refreshed.
+ - Fix bugs with tab complete.
+ - Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
+ - Fix bug where desktop notifications occasionally used \"Undefined\" as the display name.
+ - Fix more places where we sometimes saw room IDs incorrectly.
+ - Fix bug which caused lag when entering text in the text box.
+
+Changes in synapse 0.3.3 (2014-09-22)
+=====================================
+
+Homeserver:
+
+: - Fix bug where you continued to get events for rooms you had left.
+
+Webclient:
+
+: - Add support for video calls with basic UI.
+ - Fix bug where one to one chats were named after your display name rather than the other person\'s.
+ - Fix bug which caused lag when typing in the textarea.
+ - Refuse to run on browsers we know won\'t work.
+ - Trigger pagination when joining new rooms.
+ - Fix bug where we sometimes didn\'t display invitations in recents.
+ - Automatically join room when accepting a VoIP call.
+ - Disable outgoing and reject incoming calls on browsers we don\'t support VoIP in.
+ - Don\'t display desktop notifications for messages in the room you are non-idle and speaking in.
+
+Changes in synapse 0.3.2 (2014-09-18)
+=====================================
+
+Webclient:
+
+: - Fix bug where an empty \"bing words\" list in old accounts didn\'t send notifications when it should have done.
+
+Changes in synapse 0.3.1 (2014-09-18)
+=====================================
+
+This is a release to hotfix v0.3.0 to fix two regressions.
+
+Webclient:
+
+: - Fix a regression where we sometimes displayed duplicate events.
+ - Fix a regression where we didn\'t immediately remove rooms you were banned in from the recents list.
+
+Changes in synapse 0.3.0 (2014-09-18)
+=====================================
+
+See UPGRADE for information about changes to the client server API, including breaking backwards compatibility with VoIP calls and registration API.
+
+Homeserver:
+
+: - When a user changes their displayname or avatar the server will now update all their join states to reflect this.
+ - The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else.
+ - Fix bug where we didn\'t correctly pull in missing PDUs.
+ - Fix bug where prev\_content key wasn\'t always returned.
+ - Add support for password resets.
+
+Webclient:
+
+: - Improve page content loading.
+ - Join/parts now trigger desktop notifications.
+ - Always show room aliases in the UI if one is present.
+ - No longer show user-count in the recents side panel.
+ - Add up & down arrow support to the text box for message sending to step through your sent history.
+ - Don\'t display notifications for our own messages.
+ - Emotes are now formatted correctly in desktop notifications.
+ - The recents list now differentiates between public & private rooms.
+ - Fix bug where when switching between rooms the pagination flickered before the view jumped to the bottom of the screen.
+ - Add bing word support.
+
+Registration API:
+
+: - The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: \'type\':\'m.login.password\'. See UPGRADE for more information on this.
+ - The \'user\_id\' key has been renamed to \'user\' to better match the login API.
+ - There is an additional login type: \'m.login.email.identity\'.
+ - The command client and web client have been updated to reflect these changes.
+
+Changes in synapse 0.2.3 (2014-09-12)
+=====================================
+
+Homeserver:
+
+: - Fix bug where we stopped sending events to remote home servers if a user from that home server left, even if there were some still in the room.
+ - Fix bugs in the state conflict resolution where it was incorrectly rejecting events.
+
+Webclient:
+
+: - Display room names and topics.
+ - Allow setting/editing of room names and topics.
+ - Display information about rooms on the main page.
+ - Handle ban and kick events in real time.
+ - VoIP UI and reliability improvements.
+ - Add glare support for VoIP.
+ - Improvements to initial startup speed.
+ - Don\'t display duplicate join events.
+ - Local echo of messages.
+ - Differentiate sending and sent of local echo.
+ - Various minor bug fixes.
+
+Changes in synapse 0.2.2 (2014-09-06)
+=====================================
+
+Homeserver:
+
+: - When the server returns state events it now also includes the previous content.
+ - Add support for inviting people when creating a new room.
+ - Make the homeserver inform the room via m.room.aliases when a new alias is added for a room.
+ - Validate m.room.power\_level events.
+
+Webclient:
+
+: - Add support for captchas on registration.
+ - Handle m.room.aliases events.
+ - Asynchronously send messages and show a local echo.
+ - Inform the UI when a message failed to send.
+ - Only autoscroll on receiving a new message if the user was already at the bottom of the screen.
+ - Add support for ban/kick reasons.
+
+Changes in synapse 0.2.1 (2014-09-03)
+=====================================
+
+Homeserver:
+
+: - Added support for signing up with a third party id.
+ - Add synctl scripts.
+ - Added rate limiting.
+ - Add option to change the external address the content repo uses.
+ - Presence bug fixes.
+
+Webclient:
+
+: - Added support for signing up with a third party id.
+ - Added support for banning and kicking users.
+ - Added support for displaying and setting ops.
+ - Added support for room names.
+ - Fix bugs with room membership event display.
+
+Changes in synapse 0.2.0 (2014-09-02)
+=====================================
+
+This update changes many configuration options, updates the database schema and mandates SSL for server-server connections.
+
+Homeserver:
+
+: - Require SSL for server-server connections.
+ - Add SSL listener for client-server connections.
+ - Add ability to use config files.
+ - Add support for kicking/banning and power levels.
+ - Allow setting of room names and topics on creation.
+ - Change presence to include last seen time of the user.
+ - Change url path prefix to /\_matrix/\...
+ - Bug fixes to presence.
+
+Webclient:
+
+: - Reskin the CSS for registration and login.
+ - Various improvements to rooms CSS.
+ - Support changes in client-server API.
+ - Bug fixes to VOIP UI.
+ - Various bug fixes to handling of changes to room member list.
+
+Changes in synapse 0.1.2 (2014-08-29)
+=====================================
+
+Webclient:
+
+: - Add basic call state UI for VoIP calls.
+
+Changes in synapse 0.1.1 (2014-08-29)
+=====================================
+
+Homeserver:
+
+: - Fix bug that caused the event stream to not notify some clients about changes.
+
+Changes in synapse 0.1.0 (2014-08-29)
+=====================================
+
+Presence has been reenabled in this release.
+
+Homeserver:
+
+: -
+
+ Update client to server API, including:
+
+ : - Use a more consistent url scheme.
+ - Provide more useful information in the initial sync api.
+
+ - Change the presence handling to be much more efficient.
+ - Change the presence server to server API to not require explicit polling of all users who share a room with a user.
+ - Fix races in the event streaming logic.
+
+Webclient:
+
+: - Update to use new client to server API.
+ - Add basic VOIP support.
+ - Add idle timers that change your status to away.
+ - Add recent rooms column when viewing a room.
+ - Various network efficiency improvements.
+ - Add basic mobile browser support.
+ - Add a settings page.
+
+Changes in synapse 0.0.1 (2014-08-22)
+=====================================
+
+Presence has been disabled in this release due to a bug that caused the homeserver to spam other remote homeservers.
+
+Homeserver:
+
+: - Completely change the database schema to support generic event types.
+ - Improve presence reliability.
+ - Improve reliability of joining remote rooms.
+ - Fix bug where room join events were duplicated.
+ - Improve initial sync API to return more information to the client.
+ - Stop generating fake messages for room membership events.
+
+Webclient:
+
+: - Add tab completion of names.
+ - Add ability to upload and send images.
+ - Add profile pages.
+ - Improve CSS layout of room.
+ - Disambiguate identical display names.
+ - Don\'t get remote users display names and avatars individually.
+ - Use the new initial sync API to reduce number of round trips to the homeserver.
+ - Change url scheme to use room aliases instead of room ids where known.
+ - Increase longpoll timeout.
+
+Changes in synapse 0.0.0 (2014-08-13)
+=====================================
+
+> - Initial alpha release
diff --git a/CHANGES.rst b/CHANGES.rst
deleted file mode 100644
index afc4fee8..00000000
--- a/CHANGES.rst
+++ /dev/null
@@ -1,2867 +0,0 @@
-Synapse 0.33.0 (2018-07-19)
-===========================
-
-Features
---------
-
-- Enforce the specified API for report_event (`#3316 <https://github.com/matrix-org/synapse/issues/3316>`_)
-- Include CPU time from database threads in request/block metrics. (`#3496 <https://github.com/matrix-org/synapse/issues/3496>`_, `#3501 <https://github.com/matrix-org/synapse/issues/3501>`_)
-- Add CPU metrics for _fetch_event_list (`#3497 <https://github.com/matrix-org/synapse/issues/3497>`_)
-- optimisation for /sync (`#3505 <https://github.com/matrix-org/synapse/issues/3505>`_, `#3521 <https://github.com/matrix-org/synapse/issues/3521>`_)
-- Optimisation to make handling incoming federation requests more efficient. (`#3541 <https://github.com/matrix-org/synapse/issues/3541>`_)
-
-
-Bugfixes
---------
-
-- Use more portable syntax in our use of the attrs package, widening the supported versions (`#3498 <https://github.com/matrix-org/synapse/issues/3498>`_)
-- Fix queued federation requests being processed in the wrong order (`#3533 <https://github.com/matrix-org/synapse/issues/3533>`_)
-- Ensure that erasure requests are correctly honoured for publicly accessible rooms when accessed over federation. (`#3546 <https://github.com/matrix-org/synapse/issues/3546>`_)
-- Disable a noisy warning about logcontexts (`#3561 <https://github.com/matrix-org/synapse/issues/3561>`_)
-
-
-Misc
-----
-
-- `#3351 <https://github.com/matrix-org/synapse/issues/3351>`_, `#3463 <https://github.com/matrix-org/synapse/issues/3463>`_, `#3464 <https://github.com/matrix-org/synapse/issues/3464>`_, `#3499 <https://github.com/matrix-org/synapse/issues/3499>`_, `#3530 <https://github.com/matrix-org/synapse/issues/3530>`_, `#3534 <https://github.com/matrix-org/synapse/issues/3534>`_, `#3535 <https://github.com/matrix-org/synapse/issues/3535>`_, `#3540 <https://github.com/matrix-org/synapse/issues/3540>`_, `#3544 <https://github.com/matrix-org/synapse/issues/3544>`_
-
-
-Synapse 0.32.2 (2018-07-07)
-===========================
-
-Bugfixes
---------
-
-- Amend the Python dependencies to depend on attrs from PyPI, not attr (`#3492 <https://github.com/matrix-org/synapse/issues/3492>`_)
-
-
-Synapse 0.32.1 (2018-07-06)
-===========================
-
-Bugfixes
---------
-
-- Add explicit dependency on netaddr (`#3488 <https://github.com/matrix-org/synapse/issues/3488>`_)
-
-
-Changes in synapse v0.32.0 (2018-07-06)
-===========================================
-No changes since 0.32.0rc1
-
-Synapse 0.32.0rc1 (2018-07-05)
-==============================
-
-Features
---------
-
-- Add blacklist & whitelist of servers allowed to send events to a room via ``m.room.server_acl`` event.
-- Cache factor override system for specific caches (`#3334 <https://github.com/matrix-org/synapse/issues/3334>`_)
-- Add metrics to track appservice transactions (`#3344 <https://github.com/matrix-org/synapse/issues/3344>`_)
-- Try to log more helpful info when a sig verification fails (`#3372 <https://github.com/matrix-org/synapse/issues/3372>`_)
-- Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). (`#3462 <https://github.com/matrix-org/synapse/issues/3462>`_)
-- Add optional ip_range_whitelist param to AS registration files to lock AS IP access (`#3465 <https://github.com/matrix-org/synapse/issues/3465>`_)
-- Reject invalid server names in federation requests (`#3480 <https://github.com/matrix-org/synapse/issues/3480>`_)
-- Reject invalid server names in homeserver.yaml (`#3483 <https://github.com/matrix-org/synapse/issues/3483>`_)
-
-
-Bugfixes
---------
-
-- Strip access_token from outgoing requests (`#3327 <https://github.com/matrix-org/synapse/issues/3327>`_)
-- Redact AS tokens in logs (`#3349 <https://github.com/matrix-org/synapse/issues/3349>`_)
-- Fix federation backfill from SQLite servers (`#3355 <https://github.com/matrix-org/synapse/issues/3355>`_)
-- Fix event-purge-by-ts admin API (`#3363 <https://github.com/matrix-org/synapse/issues/3363>`_)
-- Fix event filtering in get_missing_events handler (`#3371 <https://github.com/matrix-org/synapse/issues/3371>`_)
-- Synapse is now stricter regarding accepting events which it cannot retrieve the prev_events for. (`#3456 <https://github.com/matrix-org/synapse/issues/3456>`_)
-- Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header (`#3470 <https://github.com/matrix-org/synapse/issues/3470>`_)
-- Invalidate cache on correct thread to avoid race (`#3473 <https://github.com/matrix-org/synapse/issues/3473>`_)
-
-
-Improved Documentation
-----------------------
-
-- ``doc/postgres.rst``: fix display of the last command block. Thanks to @ArchangeGabriel! (`#3340 <https://github.com/matrix-org/synapse/issues/3340>`_)
-
-
-Deprecations and Removals
--------------------------
-
-- Remove was_forgotten_at (`#3324 <https://github.com/matrix-org/synapse/issues/3324>`_)
-
-
-Misc
-----
-
-- `#3332 <https://github.com/matrix-org/synapse/issues/3332>`_, `#3341 <https://github.com/matrix-org/synapse/issues/3341>`_, `#3347 <https://github.com/matrix-org/synapse/issues/3347>`_, `#3348 <https://github.com/matrix-org/synapse/issues/3348>`_, `#3356 <https://github.com/matrix-org/synapse/issues/3356>`_, `#3385 <https://github.com/matrix-org/synapse/issues/3385>`_, `#3446 <https://github.com/matrix-org/synapse/issues/3446>`_, `#3447 <https://github.com/matrix-org/synapse/issues/3447>`_, `#3467 <https://github.com/matrix-org/synapse/issues/3467>`_, `#3474 <https://github.com/matrix-org/synapse/issues/3474>`_
-
-
-Changes in synapse v0.31.2 (2018-06-14)
-=======================================
-
-SECURITY UPDATE: Prevent unauthorised users from setting state events in a room
-when there is no ``m.room.power_levels`` event in force in the room. (PR #3397)
-
-Discussion around the Matrix Spec change proposal for this change can be
-followed at https://github.com/matrix-org/matrix-doc/issues/1304.
-
-Changes in synapse v0.31.1 (2018-06-08)
-=======================================
-
-v0.31.1 fixes a security bug in the ``get_missing_events`` federation API
-where event visibility rules were not applied correctly.
-
-We are not aware of it being actively exploited but please upgrade asap.
-
-Bug Fixes:
-
-* Fix event filtering in get_missing_events handler (PR #3371)
-
-Changes in synapse v0.31.0 (2018-06-06)
-=======================================
-
-Most notable change from v0.30.0 is to switch to the python prometheus library to improve system
-stats reporting. WARNING: this changes a number of prometheus metrics in a
-backwards-incompatible manner. For more details, see
-`docs/metrics-howto.rst <docs/metrics-howto.rst#removal-of-deprecated-metrics--time-based-counters-becoming-histograms-in-0310>`_.
-
-Bug Fixes:
-
-* Fix metric documentation tables (PR #3341)
-* Fix LaterGauge error handling (694968f)
-* Fix replication metrics (b7e7fd2)
-
-Changes in synapse v0.31.0-rc1 (2018-06-04)
-==========================================
-
-Features:
-
-* Switch to the Python Prometheus library (PR #3256, #3274)
-* Let users leave the server notice room after joining (PR #3287)
-
-
-Changes:
-
-* daily user type phone home stats (PR #3264)
-* Use iter* methods for _filter_events_for_server (PR #3267)
-* Docs on consent bits (PR #3268)
-* Remove users from user directory on deactivate (PR #3277)
-* Avoid sending consent notice to guest users (PR #3288)
-* disable CPUMetrics if no /proc/self/stat (PR #3299)
-* Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307)
-* Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat!
-* Reduce stuck read-receipts: ignore depth when updating (PR #3318)
-* Put python's logs into Trial when running unit tests (PR #3319)
-
-Changes, python 3 migration:
-
-* Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
-* replace some iteritems with six (PR #3244) Thanks to @NotAFile!
-* Add batch_iter to utils (PR #3245) Thanks to @NotAFile!
-* use repr, not str (PR #3246) Thanks to @NotAFile!
-* Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
-* Py3 storage/_base.py (PR #3278) Thanks to @NotAFile!
-* more six iteritems (PR #3279) Thanks to @NotAFile!
-* More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
-* remaining isintance fixes (PR #3281) Thanks to @NotAFile!
-* py3-ize state.py (PR #3283) Thanks to @NotAFile!
-* extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel!
-* use memoryview in py3 (PR #3303) Thanks to @NotAFile!
-
-Bugs:
-
-* Fix federation backfill bugs (PR #3261)
-* federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx!
-
-
-Changes in synapse v0.30.0 (2018-05-24)
-==========================================
-
-'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
-channel whereby server administrators can send messages to users on the server.
-
-They are used as part of communication of the server policies (see ``docs/consent_tracking.md``),
-however the intention is that they may also find a use for features such
-as "Message of the day".
-
-This feature is specific to Synapse, but uses standard Matrix communication mechanisms,
-so should work with any Matrix client. For more details see ``docs/server_notices.md``
-
-Further Server Notices/Consent Tracking Support:
-
-* Allow overriding the server_notices user's avatar (PR #3273)
-* Use the localpart in the consent uri (PR #3272)
-* Support for putting %(consent_uri)s in messages (PR #3271)
-* Block attempts to send server notices to remote users (PR #3270)
-* Docs on consent bits (PR #3268)
-
-
-
-Changes in synapse v0.30.0-rc1 (2018-05-23)
-==========================================
-
-Server Notices/Consent Tracking Support:
-
-* ConsentResource to gather policy consent from users (PR #3213)
-* Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
-* Infrastructure for a server notices room (PR #3232)
-* Send users a server notice about consent (PR #3236)
-* Reject attempts to send event before privacy consent is given (PR #3257)
-* Add a 'has_consented' template var to consent forms (PR #3262)
-* Fix dependency on jinja2 (PR #3263)
-
-Features:
-
-* Cohort analytics (PR #3163, #3241, #3251)
-* Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
-* Add in flight request metrics (PR #3252)
-
-Changes:
-
-* Remove unused `update_external_syncs` (PR #3233)
-* Use stream rather depth ordering for push actions (PR #3212)
-* Make purge_history operate on tokens (PR #3221)
-* Don't support limitless pagination (PR #3265)
-
-Bug Fixes:
-
-* Fix logcontext resource usage tracking (PR #3258)
-* Fix error in handling receipts (PR #3235)
-* Stop the transaction cache caching failures (PR #3255)
-
-
-Changes in synapse v0.29.1 (2018-05-17)
-==========================================
-Changes:
-
-* Update docker documentation (PR #3222)
-
-Changes in synapse v0.29.0 (2018-05-16)
-===========================================
-Not changes since v0.29.0-rc1
-
-Changes in synapse v0.29.0-rc1 (2018-05-14)
-===========================================
-
-Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a
-closed spec bug in the Client Server API. Additionally further prep for Python 3
-migration.
-
-Potentially breaking change:
-
-* Make Client-Server API return 401 for invalid token (PR #3161).
-
- This changes the Client-server spec to return a 401 error code instead of 403
- when the access token is unrecognised. This is the behaviour required by the
- specification, but some clients may be relying on the old, incorrect
- behaviour.
-
- Thanks to @NotAFile for fixing this.
-
-Features:
-
-* Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
-
-Changes - General:
-
-* nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
-* Part user from rooms on account deactivate (PR #3201)
-* Make 'unexpected logging context' into warnings (PR #3007)
-* Set Server header in SynapseRequest (PR #3208)
-* remove duplicates from groups tables (PR #3129)
-* Improve exception handling for background processes (PR #3138)
-* Add missing consumeErrors to improve exception handling (PR #3139)
-* reraise exceptions more carefully (PR #3142)
-* Remove redundant call to preserve_fn (PR #3143)
-* Trap exceptions thrown within run_in_background (PR #3144)
-
-Changes - Refactors:
-
-* Refactor /context to reuse pagination storage functions (PR #3193)
-* Refactor recent events func to use pagination func (PR #3195)
-* Refactor pagination DB API to return concrete type (PR #3196)
-* Refactor get_recent_events_for_room return type (PR #3198)
-* Refactor sync APIs to reuse pagination API (PR #3199)
-* Remove unused code path from member change DB func (PR #3200)
-* Refactor request handling wrappers (PR #3203)
-* transaction_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
-* Refactor event storage to prepare for changes in state calculations (PR #3141)
-* Set Server header in SynapseRequest (PR #3208)
-* Use deferred.addTimeout instead of time_bound_deferred (PR #3127, #3178)
-* Use run_in_background in preference to preserve_fn (PR #3140)
-
-Changes - Python 3 migration:
-
-* Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
-* run config tests on py3 (PR #3159) Thanks to @NotAFile!
-* Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
-* Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
-* Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
-* Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
-* Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
-* Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile!
-* Move more xrange to six (PR #3151) Thanks to @NotAFile!
-* make imports local (PR #3152) Thanks to @NotAFile!
-* move httplib import to six (PR #3153) Thanks to @NotAFile!
-* Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
-* more bytes strings (PR #3155) Thanks to @NotAFile!
-
-Bug Fixes:
-
-* synapse fails to start under Twisted >= 18.4 (PR #3157)
-* Fix a class of logcontext leaks (PR #3170)
-* Fix a couple of logcontext leaks in unit tests (PR #3172)
-* Fix logcontext leak in media repo (PR #3174)
-* Escape label values in prometheus metrics (PR #3175, #3186)
-* Fix 'Unhandled Error' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
-* Fix logcontext leaks in rate limiter (PR #3183)
-* notifications: Convert next_token to string according to the spec (PR #3190) Thanks to @mujx!
-* nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
-* add guard for None on purge_history api (PR #3160) Thanks to @krombel!
-
-Changes in synapse v0.28.1 (2018-05-01)
-=======================================
-
-SECURITY UPDATE
-
-* Clamp the allowed values of event depth received over federation to be
- [0, 2^63 - 1]. This mitigates an attack where malicious events
- injected with depth = 2^63 - 1 render rooms unusable. Depth is used to
- determine the cosmetic ordering of events within a room, and so the ordering
- of events in such a room will default to using stream_ordering rather than depth
- (topological_ordering).
-
- This is a temporary solution to mitigate abuse in the wild, whilst a long term solution
- is being implemented to improve how the depth parameter is used.
-
- Full details at
- https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI
-
-* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API.
-
-
-Changes in synapse v0.28.0 (2018-04-26)
-=======================================
-
-Bug Fixes:
-
-* Fix quarantine media admin API and search reindex (PR #3130)
-* Fix media admin APIs (PR #3134)
-
-
-Changes in synapse v0.28.0-rc1 (2018-04-24)
-===========================================
-
-Minor performance improvement to federation sending and bug fixes.
-
-(Note: This release does not include the delta state resolution implementation discussed in matrix live)
-
-
-Features:
-
-* Add metrics for event processing lag (PR #3090)
-* Add metrics for ResponseCache (PR #3092)
-
-Changes:
-
-* Synapse on PyPy (PR #2760) Thanks to @Valodim!
-* move handling of auto_join_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
-* Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
-* Document the behaviour of ResponseCache (PR #3059)
-* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
-* update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
-* use python3-compatible prints (PR #3074) Thanks to @NotAFile!
-* Send federation events concurrently (PR #3078)
-* Limit concurrent event sends for a room (PR #3079)
-* Improve R30 stat definition (PR #3086)
-* Send events to ASes concurrently (PR #3088)
-* Refactor ResponseCache usage (PR #3093)
-* Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
-* Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
-* Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
-* Refactor store.have_events (PR #3117)
-
-Bug Fixes:
-
-* Return 401 for invalid access_token on logout (PR #2938) Thanks to @dklug!
-* Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
-* fix federation_domain_whitelist (PR #3099)
-* Avoid creating events with huge numbers of prev_events (PR #3113)
-* Reject events which have lots of prev_events (PR #3118)
-
-
-Changes in synapse v0.27.4 (2018-04-13)
-======================================
-
-Changes:
-
-* Update canonicaljson dependency (#3095)
-
-
-Changes in synapse v0.27.3 (2018-04-11)
-======================================
-
-Bug fixes:
-
-* URL quote path segments over federation (#3082)
-
-Changes in synapse v0.27.3-rc2 (2018-04-09)
-==========================================
-
-v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates
-the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
-
-Changes in synapse v0.27.3-rc1 (2018-04-09)
-=======================================
-
-Notable changes include API support for joinability of groups. Also new metrics
-and phone home stats. Phone home stats include better visibility of system usage
-so we can tweak synpase to work better for all users rather than our own experience
-with matrix.org. Also, recording 'r30' stat which is the measure we use to track
-overal growth of the Matrix ecosystem. It is defined as:-
-
-Counts the number of native 30 day retained users, defined as:-
- * Users who have created their accounts more than 30 days
- * Where last seen at most 30 days ago
- * Where account creation and last_seen are > 30 days"
-
-
-Features:
-
-* Add joinability for groups (PR #3045)
-* Implement group join API (PR #3046)
-* Add counter metrics for calculating state delta (PR #3033)
-* R30 stats (PR #3041)
-* Measure time it takes to calculate state group ID (PR #3043)
-* Add basic performance statistics to phone home (PR #3044)
-* Add response size metrics (PR #3071)
-* phone home cache size configurations (PR #3063)
-
-Changes:
-
-* Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
-* Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile!
-* Use .iter* to avoid copies in StateHandler (PR #3006)
-* Linearize calls to _generate_user_id (PR #3029)
-* Remove last usage of ujson (PR #3030)
-* Use simplejson throughout (PR #3048)
-* Use static JSONEncoders (PR #3049)
-* Remove uses of events.content (PR #3060)
-* Improve database cache performance (PR #3068)
-
-Bug fixes:
-
-* Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte!
-* Fix replication after switch to simplejson (PR #3015)
-* 404 correctly on missing paths via NoResource (PR #3022)
-* Fix error when claiming e2e keys from offline servers (PR #3034)
-* fix tests/storage/test_user_directory.py (PR #3042)
-* use PUT instead of POST for federating groups/m.join_policy (PR #3070) Thanks to @krombel!
-* postgres port script: fix state_groups_pkey error (PR #3072)
-
-
-Changes in synapse v0.27.2 (2018-03-26)
-=======================================
-
-Bug fixes:
-
-* Fix bug which broke TCP replication between workers (PR #3015)
-
-
-Changes in synapse v0.27.1 (2018-03-26)
-=======================================
-
-Meta release as v0.27.0 temporarily pointed to the wrong commit
-
-
-Changes in synapse v0.27.0 (2018-03-26)
-=======================================
-
-No changes since v0.27.0-rc2
-
-
-Changes in synapse v0.27.0-rc2 (2018-03-19)
-===========================================
-
-Pulls in v0.26.1
-
-Bug fixes:
-
-* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
-
-
-Changes in synapse v0.26.1 (2018-03-15)
-=======================================
-
-Bug fixes:
-
-* Fix bug where an invalid event caused server to stop functioning correctly,
- due to parsing and serializing bugs in ujson library (PR #3008)
-
-
-Changes in synapse v0.27.0-rc1 (2018-03-14)
-===========================================
-
-The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
-
-This release also begins the process of renaming a number of the metrics
-reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
-Note that the v0.28.0 release will remove the deprecated metric names.
-
-Features:
-
-* Add ability for ASes to override message send time (PR #2754)
-* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
-* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
-* Add support for whitelisting 3PIDs that users can register. (PR #2813)
-* Add ``/room/{id}/event/{id}`` API (PR #2766)
-* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
-* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
-
-
-Changes:
-
-* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
-* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
-* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
-* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
-* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
-* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
-* When using synctl with workers, don't start the main synapse automatically (PR #2774)
-* Minor performance improvements (PR #2773, #2792)
-* Use a connection pool for non-federation outbound connections (PR #2817)
-* Make it possible to run unit tests against postgres (PR #2829)
-* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
-* Remove ability for AS users to call /events and /sync (PR #2948)
-* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
-
-Bug fixes:
-
-* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
-* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
-* Fix publicised groups GET API (singular) over federation (PR #2772)
-* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
-* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
-* Fix bug in quarantine_media (PR #2837)
-* Fix url_previews when no Content-Type is returned from URL (PR #2845)
-* Fix rare race in sync API when joining room (PR #2944)
-* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
-
-
-Changes in synapse v0.26.0 (2018-01-05)
-=======================================
-
-No changes since v0.26.0-rc1
-
-
-Changes in synapse v0.26.0-rc1 (2017-12-13)
-===========================================
-
-Features:
-
-* Add ability for ASes to publicise groups for their users (PR #2686)
-* Add all local users to the user_directory and optionally search them (PR
- #2723)
-* Add support for custom login types for validating users (PR #2729)
-
-
-Changes:
-
-* Update example Prometheus config to new format (PR #2648) Thanks to
- @krombel!
-* Rename redact_content option to include_content in Push API (PR #2650)
-* Declare support for r0.3.0 (PR #2677)
-* Improve upserts (PR #2684, #2688, #2689, #2713)
-* Improve documentation of workers (PR #2700)
-* Improve tracebacks on exceptions (PR #2705)
-* Allow guest access to group APIs for reading (PR #2715)
-* Support for posting content in federation_client script (PR #2716)
-* Delete devices and pushers on logouts etc (PR #2722)
-
-
-Bug fixes:
-
-* Fix database port script (PR #2673)
-* Fix internal server error on login with ldap_auth_provider (PR #2678) Thanks
- to @jkolo!
-* Fix error on sqlite 3.7 (PR #2697)
-* Fix OPTIONS on preview_url (PR #2707)
-* Fix error handling on dns lookup (PR #2711)
-* Fix wrong avatars when inviting multiple users when creating room (PR #2717)
-* Fix 500 when joining matrix-dev (PR #2719)
-
-
-Changes in synapse v0.25.1 (2017-11-17)
-=======================================
-
-Bug fixes:
-
-* Fix login with LDAP and other password provider modules (PR #2678). Thanks to
- @jkolo!
-
-Changes in synapse v0.25.0 (2017-11-15)
-=======================================
-
-Bug fixes:
-
-* Fix port script (PR #2673)
-
-
-Changes in synapse v0.25.0-rc1 (2017-11-14)
-===========================================
-
-Features:
-
-* Add is_public to groups table to allow for private groups (PR #2582)
-* Add a route for determining who you are (PR #2668) Thanks to @turt2live!
-* Add more features to the password providers (PR #2608, #2610, #2620, #2622,
- #2623, #2624, #2626, #2628, #2629)
-* Add a hook for custom rest endpoints (PR #2627)
-* Add API to update group room visibility (PR #2651)
-
-
-Changes:
-
-* Ignore <noscript> tags when generating URL preview descriptions (PR #2576)
- Thanks to @maximevaillancourt!
-* Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to
- @krombel!
-* Support /keys/upload on /r0 as well as /unstable (PR #2585)
-* Front-end proxy: pass through auth header (PR #2586)
-* Allow ASes to deactivate their own users (PR #2589)
-* Remove refresh tokens (PR #2613)
-* Automatically set default displayname on register (PR #2617)
-* Log login requests (PR #2618)
-* Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
-* Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
-* Fix various embarrassing typos around user_directory and add some doc. (PR
- #2643)
-* Return whether a user is an admin within a group (PR #2647)
-* Namespace visibility options for groups (PR #2657)
-* Downcase UserIDs on registration (PR #2662)
-* Cache failures when fetching URL previews (PR #2669)
-
-
-Bug fixes:
-
-* Fix port script (PR #2577)
-* Fix error when running synapse with no logfile (PR #2581)
-* Fix UI auth when deleting devices (PR #2591)
-* Fix typo when checking if user is invited to group (PR #2599)
-* Fix the port script to drop NUL values in all tables (PR #2611)
-* Fix appservices being backlogged and not receiving new events due to a bug in
- notify_interested_services (PR #2631) Thanks to @xyzz!
-* Fix updating rooms avatar/display name when modified by admin (PR #2636)
- Thanks to @farialima!
-* Fix bug in state group storage (PR #2649)
-* Fix 500 on invalid utf-8 in request (PR #2663)
-
-
-Changes in synapse v0.24.1 (2017-10-24)
-=======================================
-
-Bug fixes:
-
-* Fix updating group profiles over federation (PR #2567)
-
-
-Changes in synapse v0.24.0 (2017-10-23)
-=======================================
-
-No changes since v0.24.0-rc1
-
-
-Changes in synapse v0.24.0-rc1 (2017-10-19)
-===========================================
-
-Features:
-
-* Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426,
- #2430, #2454, #2471, #2472, #2544)
-* Add support for channel notifications (PR #2501)
-* Add basic implementation of backup media store (PR #2538)
-* Add config option to auto-join new users to rooms (PR #2545)
-
-
-Changes:
-
-* Make the spam checker a module (PR #2474)
-* Delete expired url cache data (PR #2478)
-* Ignore incoming events for rooms that we have left (PR #2490)
-* Allow spam checker to reject invites too (PR #2492)
-* Add room creation checks to spam checker (PR #2495)
-* Spam checking: add the invitee to user_may_invite (PR #2502)
-* Process events from federation for different rooms in parallel (PR #2520)
-* Allow error strings from spam checker (PR #2531)
-* Improve error handling for missing files in config (PR #2551)
-
-
-Bug fixes:
-
-* Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
-* Fix incompatibility with newer versions of ujson (PR #2483) Thanks to
- @jeremycline!
-* Fix notification keywords that start/end with non-word chars (PR #2500)
-* Fix stack overflow and logcontexts from linearizer (PR #2532)
-* Fix 500 error when fields missing from power_levels event (PR #2552)
-* Fix 500 error when we get an error handling a PDU (PR #2553)
-
-
-Changes in synapse v0.23.1 (2017-10-02)
-=======================================
-
-Changes:
-
-* Make 'affinity' package optional, as it is not supported on some platforms
-
-
-Changes in synapse v0.23.0 (2017-10-02)
-=======================================
-
-No changes since v0.23.0-rc2
-
-
-Changes in synapse v0.23.0-rc2 (2017-09-26)
-===========================================
-
-Bug fixes:
-
-* Fix regression in performance of syncs (PR #2470)
-
-
-Changes in synapse v0.23.0-rc1 (2017-09-25)
-===========================================
-
-Features:
-
-* Add a frontend proxy worker (PR #2344)
-* Add support for event_id_only push format (PR #2450)
-* Add a PoC for filtering spammy events (PR #2456)
-* Add a config option to block all room invites (PR #2457)
-
-
-Changes:
-
-* Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
-* Improve performance of generating push notifications (PR #2343, #2357, #2365,
- #2366, #2371)
-* Improve DB performance for device list handling in sync (PR #2362)
-* Include a sample prometheus config (PR #2416)
-* Document known to work postgres version (PR #2433) Thanks to @ptman!
-
-
-Bug fixes:
-
-* Fix caching error in the push evaluator (PR #2332)
-* Fix bug where pusherpool didn't start and broke some rooms (PR #2342)
-* Fix port script for user directory tables (PR #2375)
-* Fix device lists notifications when user rejoins a room (PR #2443, #2449)
-* Fix sync to always send down current state events in timeline (PR #2451)
-* Fix bug where guest users were incorrectly kicked (PR #2453)
-* Fix bug talking to IPv6 only servers using SRV records (PR #2462)
-
-
-Changes in synapse v0.22.1 (2017-07-06)
-=======================================
-
-Bug fixes:
-
-* Fix bug where pusher pool didn't start and caused issues when
- interacting with some rooms (PR #2342)
-
-
-Changes in synapse v0.22.0 (2017-07-06)
-=======================================
-
-No changes since v0.22.0-rc2
-
-
-Changes in synapse v0.22.0-rc2 (2017-07-04)
-===========================================
-
-Changes:
-
-* Improve performance of storing user IPs (PR #2307, #2308)
-* Slightly improve performance of verifying access tokens (PR #2320)
-* Slightly improve performance of event persistence (PR #2321)
-* Increase default cache factor size from 0.1 to 0.5 (PR #2330)
-
-Bug fixes:
-
-* Fix bug with storing registration sessions that caused frequent CPU churn
- (PR #2319)
-
-
-Changes in synapse v0.22.0-rc1 (2017-06-26)
-===========================================
-
-Features:
-
-* Add a user directory API (PR #2252, and many more)
-* Add shutdown room API to remove room from local server (PR #2291)
-* Add API to quarantine media (PR #2292)
-* Add new config option to not send event contents to push servers (PR #2301)
- Thanks to @cjdelisle!
-
-Changes:
-
-* Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256,
- #2274)
-* Deduplicate sync filters (PR #2219) Thanks to @krombel!
-* Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
-* Add count of one time keys to sync stream (PR #2237)
-* Only store event_auth for state events (PR #2247)
-* Store URL cache preview downloads separately (PR #2299)
-
-Bug fixes:
-
-* Fix users not getting notifications when AS listened to that user_id (PR
- #2216) Thanks to @slipeer!
-* Fix users without push set up not getting notifications after joining rooms
- (PR #2236)
-* Fix preview url API to trim long descriptions (PR #2243)
-* Fix bug where we used cached but unpersisted state group as prev group,
- resulting in broken state of restart (PR #2263)
-* Fix removing of pushers when using workers (PR #2267)
-* Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
-
-
-Changes in synapse v0.21.1 (2017-06-15)
-=======================================
-
-Bug fixes:
-
-* Fix bug in anonymous usage statistic reporting (PR #2281)
-
-
-Changes in synapse v0.21.0 (2017-05-18)
-=======================================
-
-No changes since v0.21.0-rc3
-
-
-Changes in synapse v0.21.0-rc3 (2017-05-17)
-===========================================
-
-Features:
-
-* Add per user rate-limiting overrides (PR #2208)
-* Add config option to limit maximum number of events requested by ``/sync``
- and ``/messages`` (PR #2221) Thanks to @psaavedra!
-
-
-Changes:
-
-* Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228,
- #2229)
-* Update username availability checker API (PR #2209, #2213)
-* When purging, don't de-delta state groups we're about to delete (PR #2214)
-* Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
-* Add an index to event_search to speed up purge history API (PR #2218)
-
-
-Bug fixes:
-
-* Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
-
-
-Changes in synapse v0.21.0-rc2 (2017-05-08)
-===========================================
-
-Changes:
-
-* Always mark remotes as up if we receive a signed request from them (PR #2190)
-
-
-Bug fixes:
-
-* Fix bug where users got pushed for rooms they had muted (PR #2200)
-
-
-Changes in synapse v0.21.0-rc1 (2017-05-08)
-===========================================
-
-Features:
-
-* Add username availability checker API (PR #2183)
-* Add read marker API (PR #2120)
-
-
-Changes:
-
-* Enable guest access for the 3pl/3pid APIs (PR #1986)
-* Add setting to support TURN for guests (PR #2011)
-* Various performance improvements (PR #2075, #2076, #2080, #2083, #2108,
- #2158, #2176, #2185)
-* Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
-* Replace HTTP replication with TCP replication (PR #2082, #2097, #2098,
- #2099, #2103, #2014, #2016, #2115, #2116, #2117)
-* Support authenticated SMTP (PR #2102) Thanks @DanielDent!
-* Add a counter metric for successfully-sent transactions (PR #2121)
-* Propagate errors sensibly from proxied IS requests (PR #2147)
-* Add more granular event send metrics (PR #2178)
-
-
-
-Bug fixes:
-
-* Fix nuke-room script to work with current schema (PR #1927) Thanks
- @zuckschwerdt!
-* Fix db port script to not assume postgres tables are in the public schema
- (PR #2024) Thanks @jerrykan!
-* Fix getting latest device IP for user with no devices (PR #2118)
-* Fix rejection of invites to unreachable servers (PR #2145)
-* Fix code for reporting old verify keys in synapse (PR #2156)
-* Fix invite state to always include all events (PR #2163)
-* Fix bug where synapse would always fetch state for any missing event (PR #2170)
-* Fix a leak with timed out HTTP connections (PR #2180)
-* Fix bug where we didn't time out HTTP requests to ASes (PR #2192)
-
-
-Docs:
-
-* Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
-* Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
-* ``web_client_location`` documentation fix (PR #2131) Thanks @matthewjwolff!
-* Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
-* Clarify setting up metrics (PR #2149) Thanks @encks!
-
-
-Changes in synapse v0.20.0 (2017-04-11)
-=======================================
-
-Bug fixes:
-
-* Fix joining rooms over federation where not all servers in the room saw the
- new server had joined (PR #2094)
-
-
-Changes in synapse v0.20.0-rc1 (2017-03-30)
-===========================================
-
-Features:
-
-* Add delete_devices API (PR #1993)
-* Add phone number registration/login support (PR #1994, #2055)
-
-
-Changes:
-
-* Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
-* Reread log config on SIGHUP (PR #1982)
-* Speed up public room list (PR #1989)
-* Add helpful texts to logger config options (PR #1990)
-* Minor ``/sync`` performance improvements. (PR #2002, #2013, #2022)
-* Add some debug to help diagnose weird federation issue (PR #2035)
-* Correctly limit retries for all federation requests (PR #2050, #2061)
-* Don't lock table when persisting new one time keys (PR #2053)
-* Reduce some CPU work on DB threads (PR #2054)
-* Cache hosts in room (PR #2060)
-* Batch sending of device list pokes (PR #2063)
-* Speed up persist event path in certain edge cases (PR #2070)
-
-
-Bug fixes:
-
-* Fix bug where current_state_events renamed to current_state_ids (PR #1849)
-* Fix routing loop when fetching remote media (PR #1992)
-* Fix current_state_events table to not lie (PR #1996)
-* Fix CAS login to handle PartialDownloadError (PR #1997)
-* Fix assertion to stop transaction queue getting wedged (PR #2010)
-* Fix presence to fallback to last_active_ts if it beats the last sync time.
- Thanks @Half-Shot! (PR #2014)
-* Fix bug when federation received a PDU while a room join is in progress (PR
- #2016)
-* Fix resetting state on rejected events (PR #2025)
-* Fix installation issues in readme. Thanks @ricco386 (PR #2037)
-* Fix caching of remote servers' signature keys (PR #2042)
-* Fix some leaking log context (PR #2048, #2049, #2057, #2058)
-* Fix rejection of invites not reaching sync (PR #2056)
-
-
-
-Changes in synapse v0.19.3 (2017-03-20)
-=======================================
-
-No changes since v0.19.3-rc2
-
-
-Changes in synapse v0.19.3-rc2 (2017-03-13)
-===========================================
-
-Bug fixes:
-
-* Fix bug in handling of incoming device list updates over federation.
-
-
-
-Changes in synapse v0.19.3-rc1 (2017-03-08)
-===========================================
-
-Features:
-
-* Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
-
-
-Changes:
-
-* Reduce database table sizes (PR #1873, #1916, #1923, #1963)
-* Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
-* Don't fetch current state when sending an event in common case (PR #1955)
-
-
-Bug fixes:
-
-* Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904)
-* Fix caching to not cache error responses (PR #1913)
-* Fix APIs to make kick & ban reasons work (PR #1917)
-* Fix bugs in the /keys/changes api (PR #1921)
-* Fix bug where users couldn't forget rooms they were banned from (PR #1922)
-* Fix issue with long language values in pushers API (PR #1925)
-* Fix a race in transaction queue (PR #1930)
-* Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR
- #1945)
-* Fix device list update to not constantly resync (PR #1964)
-* Fix potential for huge memory usage when getting device that have
- changed (PR #1969)
-
-
-
-Changes in synapse v0.19.2 (2017-02-20)
-=======================================
-
-* Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for
- pointing it out! (PR #1929)
-
-
-Changes in synapse v0.19.1 (2017-02-09)
-=======================================
-
-* Fix bug where state was incorrectly reset in a room when synapse received an
- event over federation that did not pass auth checks (PR #1892)
-
-
-Changes in synapse v0.19.0 (2017-02-04)
-=======================================
-
-No changes since RC 4.
-
-
-Changes in synapse v0.19.0-rc4 (2017-02-02)
-===========================================
-
-* Bump cache sizes for common membership queries (PR #1879)
-
-
-Changes in synapse v0.19.0-rc3 (2017-02-02)
-===========================================
-
-* Fix email push in pusher worker (PR #1875)
-* Make presence.get_new_events a bit faster (PR #1876)
-* Make /keys/changes a bit more performant (PR #1877)
-
-
-Changes in synapse v0.19.0-rc2 (2017-02-02)
-===========================================
-
-* Include newly joined users in /keys/changes API (PR #1872)
-
-
-Changes in synapse v0.19.0-rc1 (2017-02-02)
-===========================================
-
-Features:
-
-* Add support for specifying multiple bind addresses (PR #1709, #1712, #1795,
- #1835). Thanks to @kyrias!
-* Add /account/3pid/delete endpoint (PR #1714)
-* Add config option to configure the Riot URL used in notification emails (PR
- #1811). Thanks to @aperezdc!
-* Add username and password config options for turn server (PR #1832). Thanks
- to @xsteadfastx!
-* Implement device lists updates over federation (PR #1857, #1861, #1864)
-* Implement /keys/changes (PR #1869, #1872)
-
-
-Changes:
-
-* Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
-* Log which files we saved attachments to in the media_repository (PR #1791)
-* Linearize updates to membership via PUT /state/ to better handle multiple
- joins (PR #1787)
-* Limit number of entries to prefill from cache on startup (PR #1792)
-* Remove full_twisted_stacktraces option (PR #1802)
-* Measure size of some caches by sum of the size of cached values (PR #1815)
-* Measure metrics of string_cache (PR #1821)
-* Reduce logging verbosity (PR #1822, #1823, #1824)
-* Don't clobber a displayname or avatar_url if provided by an m.room.member
- event (PR #1852)
-* Better handle 401/404 response for federation /send/ (PR #1866, #1871)
-
-
-Fixes:
-
-* Fix ability to change password to a non-ascii one (PR #1711)
-* Fix push getting stuck due to looking at the wrong view of state (PR #1820)
-* Fix email address comparison to be case insensitive (PR #1827)
-* Fix occasional inconsistencies of room membership (PR #1836, #1840)
-
-
-Performance:
-
-* Don't block messages sending on bumping presence (PR #1789)
-* Change device_inbox stream index to include user (PR #1793)
-* Optimise state resolution (PR #1818)
-* Use DB cache of joined users for presence (PR #1862)
-* Add an index to make membership queries faster (PR #1867)
-
-
-Changes in synapse v0.18.7 (2017-01-09)
-=======================================
-
-No changes from v0.18.7-rc2
-
-
-Changes in synapse v0.18.7-rc2 (2017-01-07)
-===========================================
-
-Bug fixes:
-
-* Fix error in rc1's discarding invalid inbound traffic logic that was
- incorrectly discarding missing events
-
-
-Changes in synapse v0.18.7-rc1 (2017-01-06)
-===========================================
-
-Bug fixes:
-
-* Fix error in #PR 1764 to actually fix the nightmare #1753 bug.
-* Improve deadlock logging further
-* Discard inbound federation traffic from invalid domains, to immunise
- against #1753
-
-
-Changes in synapse v0.18.6 (2017-01-06)
-=======================================
-
-Bug fixes:
-
-* Fix bug when checking if a guest user is allowed to join a room (PR #1772)
- Thanks to Patrik Oldsberg for diagnosing and the fix!
-
-
-Changes in synapse v0.18.6-rc3 (2017-01-05)
-===========================================
-
-Bug fixes:
-
-* Fix bug where we failed to send ban events to the banned server (PR #1758)
-* Fix bug where we sent event that didn't originate on this server to
- other servers (PR #1764)
-* Fix bug where processing an event from a remote server took a long time
- because we were making long HTTP requests (PR #1765, PR #1744)
-
-Changes:
-
-* Improve logging for debugging deadlocks (PR #1766, PR #1767)
-
-
-Changes in synapse v0.18.6-rc2 (2016-12-30)
-===========================================
-
-Bug fixes:
-
-* Fix memory leak in twisted by initialising logging correctly (PR #1731)
-* Fix bug where fetching missing events took an unacceptable amount of time in
- large rooms (PR #1734)
-
-
-Changes in synapse v0.18.6-rc1 (2016-12-29)
-===========================================
-
-Bug fixes:
-
-* Make sure that outbound connections are closed (PR #1725)
-
-
-Changes in synapse v0.18.5 (2016-12-16)
-=======================================
-
-Bug fixes:
-
-* Fix federation /backfill returning events it shouldn't (PR #1700)
-* Fix crash in url preview (PR #1701)
-
-
-Changes in synapse v0.18.5-rc3 (2016-12-13)
-===========================================
-
-Features:
-
-* Add support for E2E for guests (PR #1653)
-* Add new API appservice specific public room list (PR #1676)
-* Add new room membership APIs (PR #1680)
-
-
-Changes:
-
-* Enable guest access for private rooms by default (PR #653)
-* Limit the number of events that can be created on a given room concurrently
- (PR #1620)
-* Log the args that we have on UI auth completion (PR #1649)
-* Stop generating refresh_tokens (PR #1654)
-* Stop putting a time caveat on access tokens (PR #1656)
-* Remove unspecced GET endpoints for e2e keys (PR #1694)
-
-
-Bug fixes:
-
-* Fix handling of 500 and 429's over federation (PR #1650)
-* Fix Content-Type header parsing (PR #1660)
-* Fix error when previewing sites that include unicode, thanks to kyrias (PR
- #1664)
-* Fix some cases where we drop read receipts (PR #1678)
-* Fix bug where calls to ``/sync`` didn't correctly timeout (PR #1683)
-* Fix bug where E2E key query would fail if a single remote host failed (PR
- #1686)
-
-
-
-Changes in synapse v0.18.5-rc2 (2016-11-24)
-===========================================
-
-Bug fixes:
-
-* Don't send old events over federation, fixes bug in -rc1.
-
-Changes in synapse v0.18.5-rc1 (2016-11-24)
-===========================================
-
-Features:
-
-* Implement "event_fields" in filters (PR #1638)
-
-Changes:
-
-* Use external ldap auth pacakge (PR #1628)
-* Split out federation transaction sending to a worker (PR #1635)
-* Fail with a coherent error message if `/sync?filter=` is invalid (PR #1636)
-* More efficient notif count queries (PR #1644)
-
-
-Changes in synapse v0.18.4 (2016-11-22)
-=======================================
-
-Bug fixes:
-
-* Add workaround for buggy clients that the fail to register (PR #1632)
-
-
-Changes in synapse v0.18.4-rc1 (2016-11-14)
-===========================================
-
-Changes:
-
-* Various database efficiency improvements (PR #1188, #1192)
-* Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR
- #1198)
-* Allow specifying duration in minutes in config, thanks to Daniel Dent (PR
- #1625)
-
-
-Bug fixes:
-
-* Fix media repo to set CORs headers on responses (PR #1190)
-* Fix registration to not error on non-ascii passwords (PR #1191)
-* Fix create event code to limit the number of prev_events (PR #1615)
-* Fix bug in transaction ID deduplication (PR #1624)
-
-
-Changes in synapse v0.18.3 (2016-11-08)
-=======================================
-
-SECURITY UPDATE
-
-Explicitly require authentication when using LDAP3. This is the default on
-versions of ``ldap3`` above 1.0, but some distributions will package an older
-version.
-
-If you are using LDAP3 login and have a version of ``ldap3`` older than 1.0 it
-is **CRITICAL to updgrade**.
-
-
-Changes in synapse v0.18.2 (2016-11-01)
-=======================================
-
-No changes since v0.18.2-rc5
-
-
-Changes in synapse v0.18.2-rc5 (2016-10-28)
-===========================================
-
-Bug fixes:
-
-* Fix prometheus process metrics in worker processes (PR #1184)
-
-
-Changes in synapse v0.18.2-rc4 (2016-10-27)
-===========================================
-
-Bug fixes:
-
-* Fix ``user_threepids`` schema delta, which in some instances prevented
- startup after upgrade (PR #1183)
-
-
-Changes in synapse v0.18.2-rc3 (2016-10-27)
-===========================================
-
-Changes:
-
-* Allow clients to supply access tokens as headers (PR #1098)
-* Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
-* Make password reset email field case insensitive (PR #1170)
-* Reduce redundant database work in email pusher (PR #1174)
-* Allow configurable rate limiting per AS (PR #1175)
-* Check whether to ratelimit sooner to avoid work (PR #1176)
-* Standardise prometheus metrics (PR #1177)
-
-
-Bug fixes:
-
-* Fix incredibly slow back pagination query (PR #1178)
-* Fix infinite typing bug (PR #1179)
-
-
-Changes in synapse v0.18.2-rc2 (2016-10-25)
-===========================================
-
-(This release did not include the changes advertised and was identical to RC1)
-
-
-Changes in synapse v0.18.2-rc1 (2016-10-17)
-===========================================
-
-Changes:
-
-* Remove redundant event_auth index (PR #1113)
-* Reduce DB hits for replication (PR #1141)
-* Implement pluggable password auth (PR #1155)
-* Remove rate limiting from app service senders and fix get_or_create_user
- requester, thanks to Patrik Oldsberg (PR #1157)
-* window.postmessage for Interactive Auth fallback (PR #1159)
-* Use sys.executable instead of hardcoded python, thanks to Pedro Larroy
- (PR #1162)
-* Add config option for adding additional TLS fingerprints (PR #1167)
-* User-interactive auth on delete device (PR #1168)
-
-
-Bug fixes:
-
-* Fix not being allowed to set your own state_key, thanks to Patrik Oldsberg
- (PR #1150)
-* Fix interactive auth to return 401 from for incorrect password (PR #1160,
- #1166)
-* Fix email push notifs being dropped (PR #1169)
-
-
-
-Changes in synapse v0.18.1 (2016-10-05)
-======================================
-
-No changes since v0.18.1-rc1
-
-
-Changes in synapse v0.18.1-rc1 (2016-09-30)
-===========================================
-
-Features:
-
-* Add total_room_count_estimate to ``/publicRooms`` (PR #1133)
-
-
-Changes:
-
-* Time out typing over federation (PR #1140)
-* Restructure LDAP authentication (PR #1153)
-
-
-Bug fixes:
-
-* Fix 3pid invites when server is already in the room (PR #1136)
-* Fix upgrading with SQLite taking lots of CPU for a few days
- after upgrade (PR #1144)
-* Fix upgrading from very old database versions (PR #1145)
-* Fix port script to work with recently added tables (PR #1146)
-
-
-Changes in synapse v0.18.0 (2016-09-19)
-=======================================
-
-The release includes major changes to the state storage database schemas, which
-significantly reduce database size. Synapse will attempt to upgrade the current
-data in the background. Servers with large SQLite database may experience
-degradation of performance while this upgrade is in progress, therefore you may
-want to consider migrating to using Postgres before upgrading very large SQLite
-databases
-
-
-Changes:
-
-* Make public room search case insensitive (PR #1127)
-
-
-Bug fixes:
-
-* Fix and clean up publicRooms pagination (PR #1129)
-
-
-Changes in synapse v0.18.0-rc1 (2016-09-16)
-===========================================
-
-Features:
-
-* Add ``only=highlight`` on ``/notifications`` (PR #1081)
-* Add server param to /publicRooms (PR #1082)
-* Allow clients to ask for the whole of a single state event (PR #1094)
-* Add is_direct param to /createRoom (PR #1108)
-* Add pagination support to publicRooms (PR #1121)
-* Add very basic filter API to /publicRooms (PR #1126)
-* Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
- #1111)
-
-
-Changes:
-
-* Move to storing state_groups_state as deltas, greatly reducing DB size (PR
- #1065)
-* Reduce amount of state pulled out of the DB during common requests (PR #1069)
-* Allow PDF to be rendered from media repo (PR #1071)
-* Reindex state_groups_state after pruning (PR #1085)
-* Clobber EDUs in send queue (PR #1095)
-* Conform better to the CAS protocol specification (PR #1100)
-* Limit how often we ask for keys from dead servers (PR #1114)
-
-
-Bug fixes:
-
-* Fix /notifications API when used with ``from`` param (PR #1080)
-* Fix backfill when cannot find an event. (PR #1107)
-
-
-Changes in synapse v0.17.3 (2016-09-09)
-=======================================
-
-This release fixes a major bug that stopped servers from handling rooms with
-over 1000 members.
-
-
-Changes in synapse v0.17.2 (2016-09-08)
-=======================================
-
-This release contains security bug fixes. Please upgrade.
-
-
-No changes since v0.17.2-rc1
-
-
-Changes in synapse v0.17.2-rc1 (2016-09-05)
-===========================================
-
-Features:
-
-* Start adding store-and-forward direct-to-device messaging (PR #1046, #1050,
- #1062, #1066)
-
-
-Changes:
-
-* Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063,
- #1068)
-* Don't notify for online to online presence transitions. (PR #1054)
-* Occasionally persist unpersisted presence updates (PR #1055)
-* Allow application services to have an optional 'url' (PR #1056)
-* Clean up old sent transactions from DB (PR #1059)
-
-
-Bug fixes:
-
-* Fix None check in backfill (PR #1043)
-* Fix membership changes to be idempotent (PR #1067)
-* Fix bug in get_pdu where it would sometimes return events with incorrect
- signature
-
-
-
-Changes in synapse v0.17.1 (2016-08-24)
-=======================================
-
-Changes:
-
-* Delete old received_transactions rows (PR #1038)
-* Pass through user-supplied content in /join/$room_id (PR #1039)
-
-
-Bug fixes:
-
-* Fix bug with backfill (PR #1040)
-
-
-Changes in synapse v0.17.1-rc1 (2016-08-22)
-===========================================
-
-Features:
-
-* Add notification API (PR #1028)
-
-
-Changes:
-
-* Don't print stack traces when failing to get remote keys (PR #996)
-* Various federation /event/ perf improvements (PR #998)
-* Only process one local membership event per room at a time (PR #1005)
-* Move default display name push rule (PR #1011, #1023)
-* Fix up preview URL API. Add tests. (PR #1015)
-* Set ``Content-Security-Policy`` on media repo (PR #1021)
-* Make notify_interested_services faster (PR #1022)
-* Add usage stats to prometheus monitoring (PR #1037)
-
-
-Bug fixes:
-
-* Fix token login (PR #993)
-* Fix CAS login (PR #994, #995)
-* Fix /sync to not clobber status_msg (PR #997)
-* Fix redacted state events to include prev_content (PR #1003)
-* Fix some bugs in the auth/ldap handler (PR #1007)
-* Fix backfill request to limit URI length, so that remotes don't reject the
- requests due to path length limits (PR #1012)
-* Fix AS push code to not send duplicate events (PR #1025)
-
-
-
-Changes in synapse v0.17.0 (2016-08-08)
-=======================================
-
-This release contains significant security bug fixes regarding authenticating
-events received over federation. PLEASE UPGRADE.
-
-This release changes the LDAP configuration format in a backwards incompatible
-way, see PR #843 for details.
-
-
-Changes:
-
-* Add federation /version API (PR #990)
-* Make psutil dependency optional (PR #992)
-
-
-Bug fixes:
-
-* Fix URL preview API to exclude HTML comments in description (PR #988)
-* Fix error handling of remote joins (PR #991)
-
-
-Changes in synapse v0.17.0-rc4 (2016-08-05)
-===========================================
-
-Changes:
-
-* Change the way we summarize URLs when previewing (PR #973)
-* Add new ``/state_ids/`` federation API (PR #979)
-* Speed up processing of ``/state/`` response (PR #986)
-
-Bug fixes:
-
-* Fix event persistence when event has already been partially persisted
- (PR #975, #983, #985)
-* Fix port script to also copy across backfilled events (PR #982)
-
-
-Changes in synapse v0.17.0-rc3 (2016-08-02)
-===========================================
-
-Changes:
-
-* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
-* Add some basic admin API docs (PR #963)
-
-
-Bug fixes:
-
-* Send the correct host header when fetching keys (PR #941)
-* Fix joining a room that has missing auth events (PR #964)
-* Fix various push bugs (PR #966, #970)
-* Fix adding emails on registration (PR #968)
-
-
-Changes in synapse v0.17.0-rc2 (2016-08-02)
-===========================================
-
-(This release did not include the changes advertised and was identical to RC1)
-
-
-Changes in synapse v0.17.0-rc1 (2016-07-28)
-===========================================
-
-This release changes the LDAP configuration format in a backwards incompatible
-way, see PR #843 for details.
-
-
-Features:
-
-* Add purge_media_cache admin API (PR #902)
-* Add deactivate account admin API (PR #903)
-* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
-* Add an admin option to shared secret registration (breaks backwards compat)
- (PR #909)
-* Add purge local room history API (PR #911, #923, #924)
-* Add requestToken endpoints (PR #915)
-* Add an /account/deactivate endpoint (PR #921)
-* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
-* Add device_id support to /login (PR #929)
-* Add device_id support to /v2/register flow. (PR #937, #942)
-* Add GET /devices endpoint (PR #939, #944)
-* Add GET /device/{deviceId} (PR #943)
-* Add update and delete APIs for devices (PR #949)
-
-
-Changes:
-
-* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
-* Linearize some federation endpoints based on (origin, room_id) (PR #879)
-* Remove the legacy v0 content upload API. (PR #888)
-* Use similar naming we use in email notifs for push (PR #894)
-* Optionally include password hash in createUser endpoint (PR #905 by
- KentShikama)
-* Use a query that postgresql optimises better for get_events_around (PR #906)
-* Fall back to 'username' if 'user' is not given for appservice registration.
- (PR #927 by Half-Shot)
-* Add metrics for psutil derived memory usage (PR #936)
-* Record device_id in client_ips (PR #938)
-* Send the correct host header when fetching keys (PR #941)
-* Log the hostname the reCAPTCHA was completed on (PR #946)
-* Make the device id on e2e key upload optional (PR #956)
-* Add r0.2.0 to the "supported versions" list (PR #960)
-* Don't include name of room for invites in push (PR #961)
-
-
-Bug fixes:
-
-* Fix substitution failure in mail template (PR #887)
-* Put most recent 20 messages in email notif (PR #892)
-* Ensure that the guest user is in the database when upgrading accounts
- (PR #914)
-* Fix various edge cases in auth handling (PR #919)
-* Fix 500 ISE when sending alias event without a state_key (PR #925)
-* Fix bug where we stored rejections in the state_group, persist all
- rejections (PR #948)
-* Fix lack of check of if the user is banned when handling 3pid invites
- (PR #952)
-* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
-
-
-
-Changes in synapse v0.16.1-r1 (2016-07-08)
-==========================================
-
-THIS IS A CRITICAL SECURITY UPDATE.
-
-This fixes a bug which allowed users' accounts to be accessed by unauthorised
-users.
-
-Changes in synapse v0.16.1 (2016-06-20)
-=======================================
-
-Bug fixes:
-
-* Fix assorted bugs in ``/preview_url`` (PR #872)
-* Fix TypeError when setting unicode passwords (PR #873)
-
-
-Performance improvements:
-
-* Turn ``use_frozen_events`` off by default (PR #877)
-* Disable responding with canonical json for federation (PR #878)
-
-
-Changes in synapse v0.16.1-rc1 (2016-06-15)
-===========================================
-
-Features: None
-
-Changes:
-
-* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
-* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
-* Linearize fetching of gaps on incoming events (PR #871)
-
-
-Bugs fixes:
-
-* Fix bug where rooms where marked as published by default (PR #857)
-* Fix bug where joining room with an event with invalid sender (PR #868)
-* Fix bug where backfilled events were sent down sync streams (PR #869)
-* Fix bug where outgoing connections could wedge indefinitely, causing push
- notifications to be unreliable (PR #870)
-
-
-Performance improvements:
-
-* Improve ``/publicRooms`` performance(PR #859)
-
-
-Changes in synapse v0.16.0 (2016-06-09)
-=======================================
-
-NB: As of v0.14 all AS config files must have an ID field.
-
-
-Bug fixes:
-
-* Don't make rooms published by default (PR #857)
-
-Changes in synapse v0.16.0-rc2 (2016-06-08)
-===========================================
-
-Features:
-
-* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
-
-Changes:
-
-* Record metrics about GC (PR #771, #847, #852)
-* Add metric counter for number of persisted events (PR #841)
-
-Bug fixes:
-
-* Fix 'From' header in email notifications (PR #843)
-* Fix presence where timeouts were not being fired for the first 8h after
- restarts (PR #842)
-* Fix bug where synapse sent malformed transactions to AS's when retrying
- transactions (Commits 310197b, 8437906)
-
-Performance improvements:
-
-* Remove event fetching from DB threads (PR #835)
-* Change the way we cache events (PR #836)
-* Add events to cache when we persist them (PR #840)
-
-
-Changes in synapse v0.16.0-rc1 (2016-06-03)
-===========================================
-
-Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
-
-Features:
-
-* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
- #821)
-* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
-* Add /report endpoint (PR #762)
-* Add basic ignore user API (PR #763)
-* Add an openidish mechanism for proving that you own a given user_id (PR #765)
-* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
-* Add secondary_directory_servers option to fetch room list from other servers
- (PR #808, #813)
-
-Changes:
-
-* Report per request metrics for all of the things using request_handler (PR
- #756)
-* Correctly handle ``NULL`` password hashes from the database (PR #775)
-* Allow receipts for events we haven't seen in the db (PR #784)
-* Make synctl read a cache factor from config file (PR #785)
-* Increment badge count per missed convo, not per msg (PR #793)
-* Special case m.room.third_party_invite event auth to match invites (PR #814)
-
-
-Bug fixes:
-
-* Fix typo in event_auth servlet path (PR #757)
-* Fix password reset (PR #758)
-
-
-Performance improvements:
-
-* Reduce database inserts when sending transactions (PR #767)
-* Queue events by room for persistence (PR #768)
-* Add cache to ``get_user_by_id`` (PR #772)
-* Add and use ``get_domain_from_id`` (PR #773)
-* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
-* Remove unused indices (PR #782)
-* Add caches to ``bulk_get_push_rules*`` (PR #804)
-* Cache ``get_event_reference_hashes`` (PR #806)
-* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
-* Use state to calculate ``get_users_in_room`` (PR #811)
-* Load push rules in storage layer so that they get cached (PR #825)
-* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
-* Poke notifier on next reactor tick (PR #829)
-* Change CacheMetrics to be quicker (PR #830)
-
-
-Changes in synapse v0.15.0-rc1 (2016-04-26)
-===========================================
-
-Features:
-
-* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
- (PR #671,#687)
-* Add URL previewing support (PR #688)
-* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
-* Add GET endpoint for pushers (PR #716)
-
-Changes:
-
-* Never notify for member events (PR #667)
-* Deduplicate identical ``/sync`` requests (PR #668)
-* Require user to have left room to forget room (PR #673)
-* Use DNS cache if within TTL (PR #677)
-* Let users see their own leave events (PR #699)
-* Deduplicate membership changes (PR #700)
-* Increase performance of pusher code (PR #705)
-* Respond with error status 504 if failed to talk to remote server (PR #731)
-* Increase search performance on postgres (PR #745)
-
-Bug fixes:
-
-* Fix bug where disabling all notifications still resulted in push (PR #678)
-* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
-* Fix bug where synapse attempted to backfill from itself (PR #693)
-* Fix bug where profile information was not correctly added when joining remote
- rooms (PR #703)
-* Fix bug where register API required incorrect key name for AS registration
- (PR #727)
-
-
-Changes in synapse v0.14.0 (2016-03-30)
-=======================================
-
-No changes from v0.14.0-rc2
-
-Changes in synapse v0.14.0-rc2 (2016-03-23)
-===========================================
-
-Features:
-
-* Add published room list API (PR #657)
-
-Changes:
-
-* Change various caches to consume less memory (PR #656, #658, #660, #662,
- #663, #665)
-* Allow rooms to be published without requiring an alias (PR #664)
-* Intern common strings in caches to reduce memory footprint (#666)
-
-Bug fixes:
-
-* Fix reject invites over federation (PR #646)
-* Fix bug where registration was not idempotent (PR #649)
-* Update aliases event after deleting aliases (PR #652)
-* Fix unread notification count, which was sometimes wrong (PR #661)
-
-Changes in synapse v0.14.0-rc1 (2016-03-14)
-===========================================
-
-Features:
-
-* Add event_id to response to state event PUT (PR #581)
-* Allow guest users access to messages in rooms they have joined (PR #587)
-* Add config for what state is included in a room invite (PR #598)
-* Send the inviter's member event in room invite state (PR #607)
-* Add error codes for malformed/bad JSON in /login (PR #608)
-* Add support for changing the actions for default rules (PR #609)
-* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
-* Add ability for alias creators to delete aliases (PR #614)
-* Add profile information to invites (PR #624)
-
-Changes:
-
-* Enforce user_id exclusivity for AS registrations (PR #572)
-* Make adding push rules idempotent (PR #587)
-* Improve presence performance (PR #582, #586)
-* Change presence semantics for ``last_active_ago`` (PR #582, #586)
-* Don't allow ``m.room.create`` to be changed (PR #596)
-* Add 800x600 to default list of valid thumbnail sizes (PR #616)
-* Always include kicks and bans in full /sync (PR #625)
-* Send history visibility on boundary changes (PR #626)
-* Register endpoint now returns a refresh_token (PR #637)
-
-Bug fixes:
-
-* Fix bug where we returned incorrect state in /sync (PR #573)
-* Always return a JSON object from push rule API (PR #606)
-* Fix bug where registering without a user id sometimes failed (PR #610)
-* Report size of ExpiringCache in cache size metrics (PR #611)
-* Fix rejection of invites to empty rooms (PR #615)
-* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
-* Pin ``pysaml2`` dependency (PR #634)
-* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
- (PR #635)
-
-Changes in synapse v0.13.3 (2016-02-11)
-=======================================
-
-* Fix bug where ``/sync`` would occasionally return events in the wrong room.
-
-Changes in synapse v0.13.2 (2016-02-11)
-=======================================
-
-* Fix bug where ``/events`` would fail to skip some events if there had been
- more events than the limit specified since the last request (PR #570)
-
-Changes in synapse v0.13.1 (2016-02-10)
-=======================================
-
-* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
- pull in the fix for SYWEB-361 so that the default client can display
- HTML messages again(!)
-
-Changes in synapse v0.13.0 (2016-02-10)
-=======================================
-
-This version includes an upgrade of the schema, specifically adding an index to
-the ``events`` table. This may cause synapse to pause for several minutes the
-first time it is started after the upgrade.
-
-Changes:
-
-* Improve general performance (PR #540, #543. #544, #54, #549, #567)
-* Change guest user ids to be incrementing integers (PR #550)
-* Improve performance of public room list API (PR #552)
-* Change profile API to omit keys rather than return null (PR #557)
-* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
- (PR #595)
-
-Bug fixes:
-
-* Fix bug with upgrading guest accounts where it would fail if you opened the
- registration email on a different device (PR #547)
-* Fix bug where unread count could be wrong (PR #568)
-
-
-
-Changes in synapse v0.12.1-rc1 (2016-01-29)
-===========================================
-
-Features:
-
-* Add unread notification counts in ``/sync`` (PR #456)
-* Add support for inviting 3pids in ``/createRoom`` (PR #460)
-* Add ability for guest accounts to upgrade (PR #462)
-* Add ``/versions`` API (PR #468)
-* Add ``event`` to ``/context`` API (PR #492)
-* Add specific error code for invalid user names in ``/register`` (PR #499)
-* Add support for push badge counts (PR #507)
-* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
-
-Changes:
-
-* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
-* Change to require unbanning before other membership changes (PR #501)
-* Change default push rules to notify for all messages (PR #486)
-* Change default push rules to not notify on membership changes (PR #514)
-* Change default push rules in one to one rooms to only notify for events that
- are messages (PR #529)
-* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
-* Change server manhole to use SSH rather than telnet (PR #473)
-* Change server to require AS users to be registered before use (PR #487)
-* Change server not to start when ASes are invalidly configured (PR #494)
-* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
-* Change maximum pagination limit to 1000 (PR #497)
-
-Bug fixes:
-
-* Fix bug where ``/sync`` didn't return when something under the leave key
- changed (PR #461)
-* Fix bug where we returned smaller rather than larger than requested
- thumbnails when ``method=crop`` (PR #464)
-* Fix thumbnails API to only return cropped thumbnails when asking for a
- cropped thumbnail (PR #475)
-* Fix bug where we occasionally still logged access tokens (PR #477)
-* Fix bug where ``/events`` would always return immediately for guest users
- (PR #480)
-* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
-* Fix enabling and disabling push rules (PR #498)
-* Fix bug where ``/register`` returned 500 when given unicode username
- (PR #513)
-
-Changes in synapse v0.12.0 (2016-01-04)
-=======================================
-
-* Expose ``/login`` under ``r0`` (PR #459)
-
-Changes in synapse v0.12.0-rc3 (2015-12-23)
-===========================================
-
-* Allow guest accounts access to ``/sync`` (PR #455)
-* Allow filters to include/exclude rooms at the room level
- rather than just from the components of the sync for each
- room. (PR #454)
-* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
-* Don't set a identicon as the avatar for a user when they register (PR #450)
-* Add a ``display_name`` to third-party invites (PR #449)
-* Send more information to the identity server for third-party invites so that
- it can send richer messages to the invitee (PR #446)
-* Cache the responses to ``/initialSync`` for 5 minutes. If a client
- retries a request to ``/initialSync`` before the a response was computed
- to the first request then the same response is used for both requests
- (PR #457)
-* Fix a bug where synapse would always request the signing keys of
- remote servers even when the key was cached locally (PR #452)
-* Fix 500 when pagination search results (PR #447)
-* Fix a bug where synapse was leaking raw email address in third-party invites
- (PR #448)
-
-Changes in synapse v0.12.0-rc2 (2015-12-14)
-===========================================
-
-* Add caches for whether rooms have been forgotten by a user (PR #434)
-* Remove instructions to use ``--process-dependency-link`` since all of the
- dependencies of synapse are on PyPI (PR #436)
-* Parallelise the processing of ``/sync`` requests (PR #437)
-* Fix race updating presence in ``/events`` (PR #444)
-* Fix bug back-populating search results (PR #441)
-* Fix bug calculating state in ``/sync`` requests (PR #442)
-
-Changes in synapse v0.12.0-rc1 (2015-12-10)
-===========================================
-
-* Host the client APIs released as r0 by
- https://matrix.org/docs/spec/r0.0.0/client_server.html
- on paths prefixed by ``/_matrix/client/r0``. (PR #430, PR #415, PR #400)
-* Updates the client APIs to match r0 of the matrix specification.
-
- * All APIs return events in the new event format, old APIs also include
- the fields needed to parse the event using the old format for
- compatibility. (PR #402)
- * Search results are now given as a JSON array rather than
- a JSON object (PR #405)
- * Miscellaneous changes to search (PR #403, PR #406, PR #412)
- * Filter JSON objects may now be passed as query parameters to ``/sync``
- (PR #431)
- * Fix implementation of ``/admin/whois`` (PR #418)
- * Only include the rooms that user has left in ``/sync`` if the client
- requests them in the filter (PR #423)
- * Don't push for ``m.room.message`` by default (PR #411)
- * Add API for setting per account user data (PR #392)
- * Allow users to forget rooms (PR #385)
-
-* Performance improvements and monitoring:
-
- * Add per-request counters for CPU time spent on the main python thread.
- (PR #421, PR #420)
- * Add per-request counters for time spent in the database (PR #429)
- * Make state updates in the C+S API idempotent (PR #416)
- * Only fire ``user_joined_room`` if the user has actually joined. (PR #410)
- * Reuse a single http client, rather than creating new ones (PR #413)
-
-* Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
-
-Changes in synapse v0.11.1 (2015-11-20)
-=======================================
-
-* Add extra options to search API (PR #394)
-* Fix bug where we did not correctly cap federation retry timers. This meant it
- could take several hours for servers to start talking to ressurected servers,
- even when they were receiving traffic from them (PR #393)
-* Don't advertise login token flow unless CAS is enabled. This caused issues
- where some clients would always use the fallback API if they did not
- recognize all login flows (PR #391)
-* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
- (PR #386)
-* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
- object (PR #389)
-
-Changes in synapse v0.11.0-r2 (2015-11-19)
-==========================================
-
-* Fix bug in database port script (PR #387)
-
-Changes in synapse v0.11.0-r1 (2015-11-18)
-==========================================
-
-* Retry and fail federation requests more aggressively for requests that block
- client side requests (PR #384)
-
-Changes in synapse v0.11.0 (2015-11-17)
-=======================================
-
-* Change CAS login API (PR #349)
-
-Changes in synapse v0.11.0-rc2 (2015-11-13)
-===========================================
-
-* Various changes to /sync API response format (PR #373)
-* Fix regression when setting display name in newly joined room over
- federation (PR #368)
-* Fix problem where /search was slow when using SQLite (PR #366)
-
-Changes in synapse v0.11.0-rc1 (2015-11-11)
-===========================================
-
-* Add Search API (PR #307, #324, #327, #336, #350, #359)
-* Add 'archived' state to v2 /sync API (PR #316)
-* Add ability to reject invites (PR #317)
-* Add config option to disable password login (PR #322)
-* Add the login fallback API (PR #330)
-* Add room context API (PR #334)
-* Add room tagging support (PR #335)
-* Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
-* Change retry schedule for application services (PR #320)
-* Change retry schedule for remote servers (PR #340)
-* Fix bug where we hosted static content in the incorrect place (PR #329)
-* Fix bug where we didn't increment retry interval for remote servers (PR #343)
-
-Changes in synapse v0.10.1-rc1 (2015-10-15)
-===========================================
-
-* Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
-* Add support for using macaroons for ``access_token`` (PR #256, #229)
-* Add support for ``m.room.canonical_alias`` (PR #287)
-* Add support for viewing the history of rooms that they have left. (PR #276,
- #294)
-* Add support for refresh tokens (PR #240)
-* Add flag on creation which disables federation of the room (PR #279)
-* Add some room state to invites. (PR #275)
-* Atomically persist events when joining a room over federation (PR #283)
-* Change default history visibility for private rooms (PR #271)
-* Allow users to redact their own sent events (PR #262)
-* Use tox for tests (PR #247)
-* Split up syutil into separate libraries (PR #243)
-
-Changes in synapse v0.10.0-r2 (2015-09-16)
-==========================================
-
-* Fix bug where we always fetched remote server signing keys instead of using
- ones in our cache.
-* Fix adding threepids to an existing account.
-* Fix bug with invinting over federation where remote server was already in
- the room. (PR #281, SYN-392)
-
-Changes in synapse v0.10.0-r1 (2015-09-08)
-==========================================
-
-* Fix bug with python packaging
-
-Changes in synapse v0.10.0 (2015-09-03)
-=======================================
-
-No change from release candidate.
-
-Changes in synapse v0.10.0-rc6 (2015-09-02)
-===========================================
-
-* Remove some of the old database upgrade scripts.
-* Fix database port script to work with newly created sqlite databases.
-
-Changes in synapse v0.10.0-rc5 (2015-08-27)
-===========================================
-
-* Fix bug that broke downloading files with ascii filenames across federation.
-
-Changes in synapse v0.10.0-rc4 (2015-08-27)
-===========================================
-
-* Allow UTF-8 filenames for upload. (PR #259)
-
-Changes in synapse v0.10.0-rc3 (2015-08-25)
-===========================================
-
-* Add ``--keys-directory`` config option to specify where files such as
- certs and signing keys should be stored in, when using ``--generate-config``
- or ``--generate-keys``. (PR #250)
-* Allow ``--config-path`` to specify a directory, causing synapse to use all
- \*.yaml files in the directory as config files. (PR #249)
-* Add ``web_client_location`` config option to specify static files to be
- hosted by synapse under ``/_matrix/client``. (PR #245)
-* Add helper utility to synapse to read and parse the config files and extract
- the value of a given key. For example::
-
- $ python -m synapse.config read server_name -c homeserver.yaml
- localhost
-
- (PR #246)
-
-
-Changes in synapse v0.10.0-rc2 (2015-08-24)
-===========================================
-
-* Fix bug where we incorrectly populated the ``event_forward_extremities``
- table, resulting in problems joining large remote rooms (e.g.
- ``#matrix:matrix.org``)
-* Reduce the number of times we wake up pushers by not listening for presence
- or typing events, reducing the CPU cost of each pusher.
-
-
-Changes in synapse v0.10.0-rc1 (2015-08-21)
-===========================================
-
-Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
-
-General:
-
-* Upgrade to Twisted 15 (PR #173)
-* Add support for serving and fetching encryption keys over federation.
- (PR #208)
-* Add support for logging in with email address (PR #234)
-* Add support for new ``m.room.canonical_alias`` event. (PR #233)
-* Change synapse to treat user IDs case insensitively during registration and
- login. (If two users already exist with case insensitive matching user ids,
- synapse will continue to require them to specify their user ids exactly.)
-* Error if a user tries to register with an email already in use. (PR #211)
-* Add extra and improve existing caches (PR #212, #219, #226, #228)
-* Batch various storage request (PR #226, #228)
-* Fix bug where we didn't correctly log the entity that triggered the request
- if the request came in via an application service (PR #230)
-* Fix bug where we needlessly regenerated the full list of rooms an AS is
- interested in. (PR #232)
-* Add support for AS's to use v2_alpha registration API (PR #210)
-
-
-Configuration:
-
-* Add ``--generate-keys`` that will generate any missing cert and key files in
- the configuration files. This is equivalent to running ``--generate-config``
- on an existing configuration file. (PR #220)
-* ``--generate-config`` now no longer requires a ``--server-name`` parameter
- when used on existing configuration files. (PR #220)
-* Add ``--print-pidfile`` flag that controls the printing of the pid to stdout
- of the demonised process. (PR #213)
-
-Media Repository:
-
-* Fix bug where we picked a lower resolution image than requested. (PR #205)
-* Add support for specifying if a the media repository should dynamically
- thumbnail images or not. (PR #206)
-
-Metrics:
-
-* Add statistics from the reactor to the metrics API. (PR #224, #225)
-
-Demo Homeservers:
-
-* Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
-* Fix enabling registration on demo homeservers (PR #223)
-
-
-Changes in synapse v0.9.4-rc1 (2015-07-21)
-==========================================
-
-General:
-
-* Add basic implementation of receipts. (SPEC-99)
-* Add support for configuration presets in room creation API. (PR #203)
-* Add auth event that limits the visibility of history for new users.
- (SPEC-134)
-* Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
-* Add client side key management APIs for end to end encryption. (PR #198)
-* Change power level semantics so that you cannot kick, ban or change power
- levels of users that have equal or greater power level than you. (SYN-192)
-* Improve performance by bulk inserting events where possible. (PR #193)
-* Improve performance by bulk verifying signatures where possible. (PR #194)
-
-
-Configuration:
-
-* Add support for including TLS certificate chains.
-
-Media Repository:
-
-* Add Content-Disposition headers to content repository responses. (SYN-150)
-
-
-Changes in synapse v0.9.3 (2015-07-01)
-======================================
-
-No changes from v0.9.3 Release Candidate 1.
-
-Changes in synapse v0.9.3-rc1 (2015-06-23)
-==========================================
-
-General:
-
-* Fix a memory leak in the notifier. (SYN-412)
-* Improve performance of room initial sync. (SYN-418)
-* General improvements to logging.
-* Remove ``access_token`` query params from ``INFO`` level logging.
-
-Configuration:
-
-* Add support for specifying and configuring multiple listeners. (SYN-389)
-
-Application services:
-
-* Fix bug where synapse failed to send user queries to application services.
-
-Changes in synapse v0.9.2-r2 (2015-06-15)
-=========================================
-
-Fix packaging so that schema delta python files get included in the package.
-
-Changes in synapse v0.9.2 (2015-06-12)
-======================================
-
-General:
-
-* Use ultrajson for json (de)serialisation when a canonical encoding is not
- required. Ultrajson is significantly faster than simplejson in certain
- circumstances.
-* Use connection pools for outgoing HTTP connections.
-* Process thumbnails on separate threads.
-
-Configuration:
-
-* Add option, ``gzip_responses``, to disable HTTP response compression.
-
-Federation:
-
-* Improve resilience of backfill by ensuring we fetch any missing auth events.
-* Improve performance of backfill and joining remote rooms by removing
- unnecessary computations. This included handling events we'd previously
- handled as well as attempting to compute the current state for outliers.
-
-
-Changes in synapse v0.9.1 (2015-05-26)
-======================================
-
-General:
-
-* Add support for backfilling when a client paginates. This allows servers to
- request history for a room from remote servers when a client tries to
- paginate history the server does not have - SYN-36
-* Fix bug where you couldn't disable non-default pushrules - SYN-378
-* Fix ``register_new_user`` script - SYN-359
-* Improve performance of fetching events from the database, this improves both
- initialSync and sending of events.
-* Improve performance of event streams, allowing synapse to handle more
- simultaneous connected clients.
-
-Federation:
-
-* Fix bug with existing backfill implementation where it returned the wrong
- selection of events in some circumstances.
-* Improve performance of joining remote rooms.
-
-Configuration:
-
-* Add support for changing the bind host of the metrics listener via the
- ``metrics_bind_host`` option.
-
-
-Changes in synapse v0.9.0-r5 (2015-05-21)
-=========================================
-
-* Add more database caches to reduce amount of work done for each pusher. This
- radically reduces CPU usage when multiple pushers are set up in the same room.
-
-Changes in synapse v0.9.0 (2015-05-07)
-======================================
-
-General:
-
-* Add support for using a PostgreSQL database instead of SQLite. See
- `docs/postgres.rst`_ for details.
-* Add password change and reset APIs. See `Registration`_ in the spec.
-* Fix memory leak due to not releasing stale notifiers - SYN-339.
-* Fix race in caches that occasionally caused some presence updates to be
- dropped - SYN-369.
-* Check server name has not changed on restart.
-* Add a sample systemd unit file and a logger configuration in
- contrib/systemd. Contributed Ivan Shapovalov.
-
-Federation:
-
-* Add key distribution mechanisms for fetching public keys of unavailable
- remote home servers. See `Retrieving Server Keys`_ in the spec.
-
-Configuration:
-
-* Add support for multiple config files.
-* Add support for dictionaries in config files.
-* Remove support for specifying config options on the command line, except
- for:
-
- * ``--daemonize`` - Daemonize the home server.
- * ``--manhole`` - Turn on the twisted telnet manhole service on the given
- port.
- * ``--database-path`` - The path to a sqlite database to use.
- * ``--verbose`` - The verbosity level.
- * ``--log-file`` - File to log to.
- * ``--log-config`` - Python logging config file.
- * ``--enable-registration`` - Enable registration for new users.
-
-Application services:
-
-* Reliably retry sending of events from Synapse to application services, as per
- `Application Services`_ spec.
-* Application services can no longer register via the ``/register`` API,
- instead their configuration should be saved to a file and listed in the
- synapse ``app_service_config_files`` config option. The AS configuration file
- has the same format as the old ``/register`` request.
- See `docs/application_services.rst`_ for more information.
-
-.. _`docs/postgres.rst`: docs/postgres.rst
-.. _`docs/application_services.rst`: docs/application_services.rst
-.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
-.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
-.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
-
-Changes in synapse v0.8.1 (2015-03-18)
-======================================
-
-* Disable registration by default. New users can be added using the command
- ``register_new_matrix_user`` or by enabling registration in the config.
-* Add metrics to synapse. To enable metrics use config options
- ``enable_metrics`` and ``metrics_port``.
-* Fix bug where banning only kicked the user.
-
-Changes in synapse v0.8.0 (2015-03-06)
-======================================
-
-General:
-
-* Add support for registration fallback. This is a page hosted on the server
- which allows a user to register for an account, regardless of what client
- they are using (e.g. mobile devices).
-
-* Added new default push rules and made them configurable by clients:
-
- * Suppress all notice messages.
- * Notify when invited to a new room.
- * Notify for messages that don't match any rule.
- * Notify on incoming call.
-
-Federation:
-
-* Added per host server side rate-limiting of incoming federation requests.
-* Added a ``/get_missing_events/`` API to federation to reduce number of
- ``/events/`` requests.
-
-Configuration:
-
-* Added configuration option to disable registration:
- ``disable_registration``.
-* Added configuration option to change soft limit of number of open file
- descriptors: ``soft_file_limit``.
-* Make ``tls_private_key_path`` optional when running with ``no_tls``.
-
-Application services:
-
-* Application services can now poll on the CS API ``/events`` for their events,
- by providing their application service ``access_token``.
-* Added exclusive namespace support to application services API.
-
-
-Changes in synapse v0.7.1 (2015-02-19)
-======================================
-
-* Initial alpha implementation of parts of the Application Services API.
- Including:
-
- - AS Registration / Unregistration
- - User Query API
- - Room Alias Query API
- - Push transport for receiving events.
- - User/Alias namespace admin control
-
-* Add cache when fetching events from remote servers to stop repeatedly
- fetching events with bad signatures.
-* Respect the per remote server retry scheme when fetching both events and
- server keys to reduce the number of times we send requests to dead servers.
-* Inform remote servers when the local server fails to handle a received event.
-* Turn off python bytecode generation due to problems experienced when
- upgrading from previous versions.
-
-Changes in synapse v0.7.0 (2015-02-12)
-======================================
-
-* Add initial implementation of the query auth federation API, allowing
- servers to agree on whether an event should be allowed or rejected.
-* Persist events we have rejected from federation, fixing the bug where
- servers would keep requesting the same events.
-* Various federation performance improvements, including:
-
- - Add in memory caches on queries such as:
-
- * Computing the state of a room at a point in time, used for
- authorization on federation requests.
- * Fetching events from the database.
- * User's room membership, used for authorizing presence updates.
-
- - Upgraded JSON library to improve parsing and serialisation speeds.
-
-* Add default avatars to new user accounts using pydenticon library.
-* Correctly time out federation requests.
-* Retry federation requests against different servers.
-* Add support for push and push rules.
-* Add alpha versions of proposed new CSv2 APIs, including ``/sync`` API.
-
-Changes in synapse 0.6.1 (2015-01-07)
-=====================================
-
-* Major optimizations to improve performance of initial sync and event sending
- in large rooms (by up to 10x)
-* Media repository now includes a Content-Length header on media downloads.
-* Improve quality of thumbnails by changing resizing algorithm.
-
-Changes in synapse 0.6.0 (2014-12-16)
-=====================================
-
-* Add new API for media upload and download that supports thumbnailing.
-* Replicate media uploads over multiple homeservers so media is always served
- to clients from their local homeserver. This obsoletes the
- --content-addr parameter and confusion over accessing content directly
- from remote homeservers.
-* Implement exponential backoff when retrying federation requests when
- sending to remote homeservers which are offline.
-* Implement typing notifications.
-* Fix bugs where we sent events with invalid signatures due to bugs where
- we incorrectly persisted events.
-* Improve performance of database queries involving retrieving events.
-
-Changes in synapse 0.5.4a (2014-12-13)
-======================================
-
-* Fix bug while generating the error message when a file path specified in
- the config doesn't exist.
-
-Changes in synapse 0.5.4 (2014-12-03)
-=====================================
-
-* Fix presence bug where some rooms did not display presence updates for
- remote users.
-* Do not log SQL timing log lines when started with "-v"
-* Fix potential memory leak.
-
-Changes in synapse 0.5.3c (2014-12-02)
-======================================
-
-* Change the default value for the `content_addr` option to use the HTTP
- listener, as by default the HTTPS listener will be using a self-signed
- certificate.
-
-Changes in synapse 0.5.3 (2014-11-27)
-=====================================
-
-* Fix bug that caused joining a remote room to fail if a single event was not
- signed correctly.
-* Fix bug which caused servers to continuously try and fetch events from other
- servers.
-
-Changes in synapse 0.5.2 (2014-11-26)
-=====================================
-
-Fix major bug that caused rooms to disappear from peoples initial sync.
-
-Changes in synapse 0.5.1 (2014-11-26)
-=====================================
-See UPGRADES.rst for specific instructions on how to upgrade.
-
- * Fix bug where we served up an Event that did not match its signatures.
- * Fix regression where we no longer correctly handled the case where a
- homeserver receives an event for a room it doesn't recognise (but is in.)
-
-Changes in synapse 0.5.0 (2014-11-19)
-=====================================
-This release includes changes to the federation protocol and client-server API
-that is not backwards compatible.
-
-This release also changes the internal database schemas and so requires servers to
-drop their current history. See UPGRADES.rst for details.
-
-Homeserver:
- * Add authentication and authorization to the federation protocol. Events are
- now signed by their originating homeservers.
- * Implement the new authorization model for rooms.
- * Split out web client into a seperate repository: matrix-angular-sdk.
- * Change the structure of PDUs.
- * Fix bug where user could not join rooms via an alias containing 4-byte
- UTF-8 characters.
- * Merge concept of PDUs and Events internally.
- * Improve logging by adding request ids to log lines.
- * Implement a very basic room initial sync API.
- * Implement the new invite/join federation APIs.
-
-Webclient:
- * The webclient has been moved to a seperate repository.
-
-Changes in synapse 0.4.2 (2014-10-31)
-=====================================
-
-Homeserver:
- * Fix bugs where we did not notify users of correct presence updates.
- * Fix bug where we did not handle sub second event stream timeouts.
-
-Webclient:
- * Add ability to click on messages to see JSON.
- * Add ability to redact messages.
- * Add ability to view and edit all room state JSON.
- * Handle incoming redactions.
- * Improve feedback on errors.
- * Fix bugs in mobile CSS.
- * Fix bugs with desktop notifications.
-
-Changes in synapse 0.4.1 (2014-10-17)
-=====================================
-Webclient:
- * Fix bug with display of timestamps.
-
-Changes in synpase 0.4.0 (2014-10-17)
-=====================================
-This release includes changes to the federation protocol and client-server API
-that is not backwards compatible.
-
-The Matrix specification has been moved to a separate git repository:
-http://github.com/matrix-org/matrix-doc
-
-You will also need an updated syutil and config. See UPGRADES.rst.
-
-Homeserver:
- * Sign federation transactions to assert strong identity over federation.
- * Rename timestamp keys in PDUs and events from 'ts' and 'hsob_ts' to 'origin_server_ts'.
-
-
-Changes in synapse 0.3.4 (2014-09-25)
-=====================================
-This version adds support for using a TURN server. See docs/turn-howto.rst on
-how to set one up.
-
-Homeserver:
- * Add support for redaction of messages.
- * Fix bug where inviting a user on a remote home server could take up to
- 20-30s.
- * Implement a get current room state API.
- * Add support specifying and retrieving turn server configuration.
-
-Webclient:
- * Add button to send messages to users from the home page.
- * Add support for using TURN for VoIP calls.
- * Show display name change messages.
- * Fix bug where the client didn't get the state of a newly joined room
- until after it has been refreshed.
- * Fix bugs with tab complete.
- * Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
- * Fix bug where desktop notifications occasionally used "Undefined" as the
- display name.
- * Fix more places where we sometimes saw room IDs incorrectly.
- * Fix bug which caused lag when entering text in the text box.
-
-Changes in synapse 0.3.3 (2014-09-22)
-=====================================
-
-Homeserver:
- * Fix bug where you continued to get events for rooms you had left.
-
-Webclient:
- * Add support for video calls with basic UI.
- * Fix bug where one to one chats were named after your display name rather
- than the other person's.
- * Fix bug which caused lag when typing in the textarea.
- * Refuse to run on browsers we know won't work.
- * Trigger pagination when joining new rooms.
- * Fix bug where we sometimes didn't display invitations in recents.
- * Automatically join room when accepting a VoIP call.
- * Disable outgoing and reject incoming calls on browsers we don't support
- VoIP in.
- * Don't display desktop notifications for messages in the room you are
- non-idle and speaking in.
-
-Changes in synapse 0.3.2 (2014-09-18)
-=====================================
-
-Webclient:
- * Fix bug where an empty "bing words" list in old accounts didn't send
- notifications when it should have done.
-
-Changes in synapse 0.3.1 (2014-09-18)
-=====================================
-This is a release to hotfix v0.3.0 to fix two regressions.
-
-Webclient:
- * Fix a regression where we sometimes displayed duplicate events.
- * Fix a regression where we didn't immediately remove rooms you were
- banned in from the recents list.
-
-Changes in synapse 0.3.0 (2014-09-18)
-=====================================
-See UPGRADE for information about changes to the client server API, including
-breaking backwards compatibility with VoIP calls and registration API.
-
-Homeserver:
- * When a user changes their displayname or avatar the server will now update
- all their join states to reflect this.
- * The server now adds "age" key to events to indicate how old they are. This
- is clock independent, so at no point does any server or webclient have to
- assume their clock is in sync with everyone else.
- * Fix bug where we didn't correctly pull in missing PDUs.
- * Fix bug where prev_content key wasn't always returned.
- * Add support for password resets.
-
-Webclient:
- * Improve page content loading.
- * Join/parts now trigger desktop notifications.
- * Always show room aliases in the UI if one is present.
- * No longer show user-count in the recents side panel.
- * Add up & down arrow support to the text box for message sending to step
- through your sent history.
- * Don't display notifications for our own messages.
- * Emotes are now formatted correctly in desktop notifications.
- * The recents list now differentiates between public & private rooms.
- * Fix bug where when switching between rooms the pagination flickered before
- the view jumped to the bottom of the screen.
- * Add bing word support.
-
-Registration API:
- * The registration API has been overhauled to function like the login API. In
- practice, this means registration requests must now include the following:
- 'type':'m.login.password'. See UPGRADE for more information on this.
- * The 'user_id' key has been renamed to 'user' to better match the login API.
- * There is an additional login type: 'm.login.email.identity'.
- * The command client and web client have been updated to reflect these changes.
-
-Changes in synapse 0.2.3 (2014-09-12)
-=====================================
-
-Homeserver:
- * Fix bug where we stopped sending events to remote home servers if a
- user from that home server left, even if there were some still in the
- room.
- * Fix bugs in the state conflict resolution where it was incorrectly
- rejecting events.
-
-Webclient:
- * Display room names and topics.
- * Allow setting/editing of room names and topics.
- * Display information about rooms on the main page.
- * Handle ban and kick events in real time.
- * VoIP UI and reliability improvements.
- * Add glare support for VoIP.
- * Improvements to initial startup speed.
- * Don't display duplicate join events.
- * Local echo of messages.
- * Differentiate sending and sent of local echo.
- * Various minor bug fixes.
-
-Changes in synapse 0.2.2 (2014-09-06)
-=====================================
-
-Homeserver:
- * When the server returns state events it now also includes the previous
- content.
- * Add support for inviting people when creating a new room.
- * Make the homeserver inform the room via `m.room.aliases` when a new alias
- is added for a room.
- * Validate `m.room.power_level` events.
-
-Webclient:
- * Add support for captchas on registration.
- * Handle `m.room.aliases` events.
- * Asynchronously send messages and show a local echo.
- * Inform the UI when a message failed to send.
- * Only autoscroll on receiving a new message if the user was already at the
- bottom of the screen.
- * Add support for ban/kick reasons.
-
-Changes in synapse 0.2.1 (2014-09-03)
-=====================================
-
-Homeserver:
- * Added support for signing up with a third party id.
- * Add synctl scripts.
- * Added rate limiting.
- * Add option to change the external address the content repo uses.
- * Presence bug fixes.
-
-Webclient:
- * Added support for signing up with a third party id.
- * Added support for banning and kicking users.
- * Added support for displaying and setting ops.
- * Added support for room names.
- * Fix bugs with room membership event display.
-
-Changes in synapse 0.2.0 (2014-09-02)
-=====================================
-This update changes many configuration options, updates the
-database schema and mandates SSL for server-server connections.
-
-Homeserver:
- * Require SSL for server-server connections.
- * Add SSL listener for client-server connections.
- * Add ability to use config files.
- * Add support for kicking/banning and power levels.
- * Allow setting of room names and topics on creation.
- * Change presence to include last seen time of the user.
- * Change url path prefix to /_matrix/...
- * Bug fixes to presence.
-
-Webclient:
- * Reskin the CSS for registration and login.
- * Various improvements to rooms CSS.
- * Support changes in client-server API.
- * Bug fixes to VOIP UI.
- * Various bug fixes to handling of changes to room member list.
-
-Changes in synapse 0.1.2 (2014-08-29)
-=====================================
-
-Webclient:
- * Add basic call state UI for VoIP calls.
-
-Changes in synapse 0.1.1 (2014-08-29)
-=====================================
-
-Homeserver:
- * Fix bug that caused the event stream to not notify some clients about
- changes.
-
-Changes in synapse 0.1.0 (2014-08-29)
-=====================================
-Presence has been reenabled in this release.
-
-Homeserver:
- * Update client to server API, including:
- - Use a more consistent url scheme.
- - Provide more useful information in the initial sync api.
- * Change the presence handling to be much more efficient.
- * Change the presence server to server API to not require explicit polling of
- all users who share a room with a user.
- * Fix races in the event streaming logic.
-
-Webclient:
- * Update to use new client to server API.
- * Add basic VOIP support.
- * Add idle timers that change your status to away.
- * Add recent rooms column when viewing a room.
- * Various network efficiency improvements.
- * Add basic mobile browser support.
- * Add a settings page.
-
-Changes in synapse 0.0.1 (2014-08-22)
-=====================================
-Presence has been disabled in this release due to a bug that caused the
-homeserver to spam other remote homeservers.
-
-Homeserver:
- * Completely change the database schema to support generic event types.
- * Improve presence reliability.
- * Improve reliability of joining remote rooms.
- * Fix bug where room join events were duplicated.
- * Improve initial sync API to return more information to the client.
- * Stop generating fake messages for room membership events.
-
-Webclient:
- * Add tab completion of names.
- * Add ability to upload and send images.
- * Add profile pages.
- * Improve CSS layout of room.
- * Disambiguate identical display names.
- * Don't get remote users display names and avatars individually.
- * Use the new initial sync API to reduce number of round trips to the homeserver.
- * Change url scheme to use room aliases instead of room ids where known.
- * Increase longpoll timeout.
-
-Changes in synapse 0.0.0 (2014-08-13)
-=====================================
-
- * Initial alpha release
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 6c295cfb..aa2738ee 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -51,7 +51,7 @@ makes it horribly hard to review otherwise.
Changelog
~~~~~~~~~
-All changes, even minor ones, need a corresponding changelog
+All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 565341fe..00000000
--- a/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM docker.io/python:2-alpine3.7
-
-RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev libxslt-dev
-
-COPY . /synapse
-
-# A wheel cache may be provided in ./cache for faster build
-RUN cd /synapse \
- && pip install --upgrade pip setuptools psycopg2 lxml \
- && mkdir -p /synapse/cache \
- && pip install -f /synapse/cache --upgrade --process-dependency-links . \
- && mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
- && rm -rf setup.py setup.cfg synapse
-
-VOLUME ["/data"]
-
-EXPOSE 8008/tcp 8448/tcp
-
-ENTRYPOINT ["/start.py"]
diff --git a/MANIFEST.in b/MANIFEST.in
index 97f57f44..1ff98d95 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,6 +2,7 @@ include synctl
include LICENSE
include VERSION
include *.rst
+include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
@@ -34,3 +35,4 @@ recursive-include changelog.d *
prune .github
prune demo/etc
+prune docker
diff --git a/README.rst b/README.rst
index 4fe54b0c..4c5971d0 100644
--- a/README.rst
+++ b/README.rst
@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
-<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
+<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
Thanks for using Matrix!
@@ -157,12 +157,19 @@ if you prefer.
In case of problems, please see the _`Troubleshooting` section below.
-There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker`. Further information on this including configuration options is available in `contrib/docker/README.md`.
+There is an offical synapse image available at
+https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
+the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
+this including configuration options is available in the README on
+hub.docker.com.
-Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/
+Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
+Dockerfile to automate a synapse server in a single Docker image, at
+https://hub.docker.com/r/avhost/docker-matrix/tags/
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
-tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
+tested with VirtualBox/AWS/DigitalOcean - see
+https://github.com/EMnify/matrix-synapse-auto-deploy
for details.
Configuring synapse
@@ -283,7 +290,7 @@ Connecting to Synapse from a client
The easiest way to try out your new Synapse installation is by connecting to it
from a web client. The easiest option is probably the one at
-http://riot.im/app. You will need to specify a "Custom server" when you log on
+https://riot.im/app. You will need to specify a "Custom server" when you log on
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
@@ -329,7 +336,7 @@ Security Note
=============
Matrix serves raw user generated data in some APIs - specifically the `content
-repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
+repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
Whilst we have tried to mitigate against possible XSS attacks (e.g.
https://github.com/matrix-org/synapse/pull/1021) we recommend running
@@ -348,7 +355,7 @@ Platform-Specific Instructions
Debian
------
-Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
+Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
Note that these packages do not include a client - choose one from
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
@@ -362,6 +369,19 @@ Synapse is in the Fedora repositories as ``matrix-synapse``::
Oleg Girko provides Fedora RPMs at
https://obs.infoserver.lv/project/monitor/matrix-synapse
+OpenSUSE
+--------
+
+Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
+
+ sudo zypper install matrix-synapse
+
+SUSE Linux Enterprise Server
+----------------------------
+
+Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
+https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
+
ArchLinux
---------
@@ -524,7 +544,7 @@ Troubleshooting Running
-----------------------
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
-to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
+to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
encryption and digital signatures.
Unfortunately PyNACL currently has a few issues
(https://github.com/pyca/pynacl/issues/53) and
@@ -672,8 +692,8 @@ useful just for development purposes. See `<demo/README>`_.
Using PostgreSQL
================
-As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
-alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
+As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
+alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
traditionally used for convenience and simplicity.
The advantages of Postgres include:
@@ -697,7 +717,7 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
-`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
+`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
diff --git a/contrib/docker/README.md b/contrib/docker/README.md
index 61592109..05254e51 100644
--- a/contrib/docker/README.md
+++ b/contrib/docker/README.md
@@ -1,29 +1,5 @@
# Synapse Docker
-The `matrixdotorg/synapse` Docker image will run Synapse as a single process. It does not provide a
-database server or a TURN server, you should run these separately.
-
-If you run a Postgres server, you should simply include it in the same Compose
-project or set the proper environment variables and the image will automatically
-use that server.
-
-## Build
-
-Build the docker image with the `docker build` command from the root of the synapse repository.
-
-```
-docker build -t docker.io/matrixdotorg/synapse .
-```
-
-The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
-
-You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
-
-## Run
-
-This image is designed to run either with an automatically generated configuration
-file or with a custom configuration that requires manual edition.
-
### Automated configuration
It is recommended that you use Docker Compose to run your containers, including
@@ -60,94 +36,6 @@ Then, customize your configuration and run the server:
docker-compose up -d
```
-### Without Compose
-
-If you do not wish to use Compose, you may still run this image using plain
-Docker commands. Note that the following is just a guideline and you may need
-to add parameters to the docker run command to account for the network situation
-with your postgres database.
-
-```
-docker run \
- -d \
- --name synapse \
- -v ${DATA_PATH}:/data \
- -e SYNAPSE_SERVER_NAME=my.matrix.host \
- -e SYNAPSE_REPORT_STATS=yes \
- docker.io/matrixdotorg/synapse:latest
-```
-
-## Volumes
-
-The image expects a single volume, located at ``/data``, that will hold:
-
-* temporary files during uploads;
-* uploaded media and thumbnails;
-* the SQLite database if you do not configure postgres;
-* the appservices configuration.
-
-You are free to use separate volumes depending on storage endpoints at your
-disposal. For instance, ``/data/media`` coud be stored on a large but low
-performance hdd storage while other files could be stored on high performance
-endpoints.
-
-In order to setup an application service, simply create an ``appservices``
-directory in the data volume and write the application service Yaml
-configuration file there. Multiple application services are supported.
-
-## Environment
-
-Unless you specify a custom path for the configuration file, a very generic
-file will be generated, based on the following environment settings.
-These are a good starting point for setting up your own deployment.
-
-Global settings:
-
-* ``UID``, the user id Synapse will run as [default 991]
-* ``GID``, the group id Synapse will run as [default 991]
-* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
-
-If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
-then customize it manually. No other environment variable is required.
-
-Otherwise, a dynamic configuration file will be used. The following environment
-variables are available for configuration:
-
-* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
-* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
- statistics reporting back to the Matrix project which helps us to get funding.
-* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
- you run your own TLS-capable reverse proxy).
-* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
- the Synapse instance.
-* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
-* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
-* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
-* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
- key in order to enable recaptcha upon registration.
-* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
- key in order to enable recaptcha upon registration.
-* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
- uris to enable TURN for this homeserver.
-* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
-
-Shared secrets, that will be initialized to random values if not set:
-
-* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
- registration is disable.
-* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
- to the server.
-
-Database specific values (will use SQLite if not set):
-
-* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
-* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
-* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
-* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
-
-Mail server specific values (will not send emails if not set):
+### More information
-* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
-* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
-* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
-* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
+For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 0b531949..3a8dfbae 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -6,6 +6,7 @@ version: '3'
services:
synapse:
+ build: ../..
image: docker.io/matrixdotorg/synapse:latest
# Since snyapse does not retry to connect to the database, restart upon
# failure
diff --git a/contrib/grafana/README.md b/contrib/grafana/README.md
new file mode 100644
index 00000000..6a6cc0be
--- /dev/null
+++ b/contrib/grafana/README.md
@@ -0,0 +1,6 @@
+# Using the Synapse Grafana dashboard
+
+0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
+1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
+2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
+3. Set up additional recording rules
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
new file mode 100644
index 00000000..94a1de58
--- /dev/null
+++ b/contrib/grafana/synapse.json
@@ -0,0 +1,4961 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "5.2.0"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": "5.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": "5.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "5.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "$datasource",
+ "enable": false,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Annotations & Alerts",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": null,
+ "iteration": 1533026624326,
+ "links": [
+ {
+ "asDropdown": true,
+ "icon": "external link",
+ "keepTime": true,
+ "tags": [
+ "matrix"
+ ],
+ "title": "Dashboards",
+ "type": "dashboards"
+ }
+ ],
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 73,
+ "panels": [],
+ "title": "Overview",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 75,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "process_cpu_seconds:rate2m{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} ",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": 0,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "heatmap": {},
+ "highlightCards": true,
+ "id": 85,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Event Send Time",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 2,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 20,
+ "target": ""
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Events Persisted",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 54,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 34,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}} {{index}}",
+ "refId": "A",
+ "step": 20,
+ "target": ""
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 37,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/max$/",
+ "color": "#890F02",
+ "fill": 0,
+ "legend": false
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "hide": true,
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} max",
+ "refId": "B",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Open FDs",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Avg time waiting for db conn",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 49,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^up/",
+ "legend": false,
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Prometheus scrape time",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "decimals": 0,
+ "format": "none",
+ "label": "",
+ "logBase": 1,
+ "max": "0",
+ "min": "-1",
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "id": 50,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Avg reactor tick time",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "id": 5,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/user/"
+ },
+ {
+ "alias": "/system/"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} system ",
+ "metric": "",
+ "refId": "B",
+ "step": 20
+ },
+ {
+ "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} user",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "line": true,
+ "lineColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 0.5
+ },
+ {
+ "colorMode": "custom",
+ "line": true,
+ "lineColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 0.8
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1.2",
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 39
+ },
+ "id": 53,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Up",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Process info",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 56,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 1,
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "id": 40,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Events/s Local vs Remote",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 1,
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "id": 46,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{type}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Events/s by Type",
+ "tooltip": {
+ "shared": false,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+ "irc-freenode (local)": "#EAB839"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 1,
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 56
+ },
+ "id": 44,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{origin_entity}} ({{origin_type}})",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Events/s by Origin",
+ "tooltip": {
+ "shared": false,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 1,
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 56
+ },
+ "id": 45,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\"}[$bucket_size])) by (origin_type, origin_entity)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{origin_entity}} ({{origin_type}})",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memberships/s by Origin",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Event persist rates",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 57,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 2,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 48
+ },
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 100
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 250
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Request Count by arrival time",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 48
+ },
+ "id": 32,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}",
+ "refId": "A",
+ "step": 20,
+ "target": ""
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 Request Counts",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 2,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 56
+ },
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 100,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 250,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total CPU Usage by Endpoint",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 2,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 56
+ },
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 100
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 250
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average CPU Usage by Endpoint",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 64
+ },
+ "id": 7,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "DB Usage by endpoint",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 2,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 64
+ },
+ "id": 47,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])/rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Non-sync avg response time",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 72
+ },
+ "id": 103,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests in flight",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Requests",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 97,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "id": 99,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU usage by background jobs",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "id": 101,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "DB usage by background jobs",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Background jobs",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
+ "id": 81,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 79,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_federation_client_sent_transactions{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "txn rate",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Outgoing federation transaction rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 83,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_federation_server_received_pdus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "pdus",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(synapse_federation_server_received_edus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "edus",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Incoming PDU/EDU rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Federation",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 60,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "id": 51,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_push_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "processed {{job}}",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "rate(synapse_push_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "failed {{job}}",
+ "refId": "B",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "HTTP Push rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Pushes",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 58,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{desc}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top DB transactions by txn rate",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 11,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "topk(5, rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "format": "time_series",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{desc}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top DB transactions by total txn time",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Database",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 59,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 12,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total CPU Usage by Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 26,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average CPU Time per Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 0,
+ "y": 30
+ },
+ "id": 13,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total DB Usage by Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 12,
+ "y": 30
+ },
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average Database Time per Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 0,
+ "y": 43
+ },
+ "id": 28,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average Transactions per Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 13,
+ "w": 12,
+ "x": 12,
+ "y": 43
+ },
+ "id": 25,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average Wallclock Time per Block",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Per-block metrics",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 61,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 55
+ },
+ "id": 1,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache Hit Ratio",
+ "tooltip": {
+ "msResolution": true,
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 55
+ },
+ "id": 8,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache Size",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 38,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache request rate",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "rps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 39,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 cache misses",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "rps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 75
+ },
+ "id": 65,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache eviction rate",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "hertz",
+ "label": "entries / second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Caches",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 62,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 90
+ },
+ "id": 91,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total GC time by bucket (10m smoothing)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 90
+ },
+ "id": 21,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}} {{index}} gen {{gen}} ",
+ "refId": "A",
+ "step": 20,
+ "target": ""
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average GC Time Per Collection",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 99
+ },
+ "id": 89,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Currently allocated objects",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 99
+ },
+ "id": 93,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Object counts per collection",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 108
+ },
+ "id": 95,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} gen {{gen}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "GC frequency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": 0,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.5,
+ "max": null,
+ "min": 0,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 108
+ },
+ "heatmap": {},
+ "highlightCards": true,
+ "id": 87,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "targets": [
+ {
+ "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "GC durations",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ }
+ ],
+ "repeat": null,
+ "title": "GC",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 63,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 19
+ },
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "user started/stopped syncing",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "federation ack",
+ "refId": "B",
+ "step": 20
+ },
+ {
+ "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "remove pusher",
+ "refId": "C",
+ "step": 20
+ },
+ {
+ "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "invalidate cache",
+ "refId": "D",
+ "step": 20
+ },
+ {
+ "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "user ip cache",
+ "refId": "E",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Rate of events on replication master",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 19
+ },
+ "id": 41,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{stream_name}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Outgoing stream updates",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "id": 42,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{command}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Rate of incoming commands",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "id": 43,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} {{command}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Rate of outgoing commands",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "title": "Replication",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 69,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 11
+ },
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": " synapse_event_persisted_position{instance=\"$instance\"} - ignoring(index, job, name) group_right(instance) synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Event processing lag",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "events",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 11
+ },
+ "id": 71,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{name}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Age of last processed event",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "title": "Event processing loop positions",
+ "type": "row"
+ }
+ ],
+ "refresh": "1m",
+ "schemaVersion": 16,
+ "style": "dark",
+ "tags": [
+ "matrix"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allFormat": "glob",
+ "auto": true,
+ "auto_count": 100,
+ "auto_min": "30s",
+ "current": {
+ "text": "auto",
+ "value": "$__auto_interval_bucket_size"
+ },
+ "datasource": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Bucket Size",
+ "multi": false,
+ "multiFormat": "glob",
+ "name": "bucket_size",
+ "options": [
+ {
+ "selected": true,
+ "text": "auto",
+ "value": "$__auto_interval_bucket_size"
+ },
+ {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "2m",
+ "value": "2m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ }
+ ],
+ "query": "30s,1m,2m,5m",
+ "refresh": 2,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "instance",
+ "options": [],
+ "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, instance)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allFormat": "regex wildcard",
+ "allValue": "",
+ "current": {},
+ "datasource": "$datasource",
+ "hide": 0,
+ "hideLabel": false,
+ "includeAll": true,
+ "label": "Job",
+ "multi": true,
+ "multiFormat": "regex values",
+ "name": "job",
+ "options": [],
+ "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, job)",
+ "refresh": 2,
+ "refresh_on_load": false,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allFormat": "regex wildcard",
+ "allValue": ".*",
+ "current": {},
+ "datasource": "$datasource",
+ "hide": 0,
+ "hideLabel": false,
+ "includeAll": true,
+ "label": "",
+ "multi": true,
+ "multiFormat": "regex values",
+ "name": "index",
+ "options": [],
+ "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, index)",
+ "refresh": 2,
+ "refresh_on_load": false,
+ "regex": "",
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "now": true,
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Synapse",
+ "uid": "000000012",
+ "version": 125
+} \ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 00000000..26fb3a6b
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,35 @@
+FROM docker.io/python:2-alpine3.7
+
+RUN apk add --no-cache --virtual .nacl_deps \
+ build-base \
+ libffi-dev \
+ libjpeg-turbo-dev \
+ libressl-dev \
+ libxslt-dev \
+ linux-headers \
+ postgresql-dev \
+ su-exec \
+ zlib-dev
+
+COPY . /synapse
+
+# A wheel cache may be provided in ./cache for faster build
+RUN cd /synapse \
+ && pip install --upgrade \
+ lxml \
+ pip \
+ psycopg2 \
+ setuptools \
+ && mkdir -p /synapse/cache \
+ && pip install -f /synapse/cache --upgrade --process-dependency-links . \
+ && mv /synapse/docker/start.py /synapse/docker/conf / \
+ && rm -rf \
+ setup.cfg \
+ setup.py \
+ synapse
+
+VOLUME ["/data"]
+
+EXPOSE 8008/tcp 8448/tcp
+
+ENTRYPOINT ["/start.py"]
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 00000000..038c78f7
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,124 @@
+# Synapse Docker
+
+This Docker image will run Synapse as a single process. It does not provide a database
+server or a TURN server, you should run these separately.
+
+## Run
+
+We do not currently offer a `latest` image, as this has somewhat undefined semantics.
+We instead release only tagged versions so upgrading between releases is entirely
+within your control.
+
+### Using docker-compose (easier)
+
+This image is designed to run either with an automatically generated configuration
+file or with a custom configuration that requires manual editing.
+
+An easy way to make use of this image is via docker-compose. See the
+[contrib/docker](../contrib/docker)
+section of the synapse project for examples.
+
+### Without Compose (harder)
+
+If you do not wish to use Compose, you may still run this image using plain
+Docker commands. Note that the following is just a guideline and you may need
+to add parameters to the docker run command to account for the network situation
+with your postgres database.
+
+```
+docker run \
+ -d \
+ --name synapse \
+ -v ${DATA_PATH}:/data \
+ -e SYNAPSE_SERVER_NAME=my.matrix.host \
+ -e SYNAPSE_REPORT_STATS=yes \
+ docker.io/matrixdotorg/synapse:latest
+```
+
+## Volumes
+
+The image expects a single volume, located at ``/data``, that will hold:
+
+* temporary files during uploads;
+* uploaded media and thumbnails;
+* the SQLite database if you do not configure postgres;
+* the appservices configuration.
+
+You are free to use separate volumes depending on storage endpoints at your
+disposal. For instance, ``/data/media`` coud be stored on a large but low
+performance hdd storage while other files could be stored on high performance
+endpoints.
+
+In order to setup an application service, simply create an ``appservices``
+directory in the data volume and write the application service Yaml
+configuration file there. Multiple application services are supported.
+
+## Environment
+
+Unless you specify a custom path for the configuration file, a very generic
+file will be generated, based on the following environment settings.
+These are a good starting point for setting up your own deployment.
+
+Global settings:
+
+* ``UID``, the user id Synapse will run as [default 991]
+* ``GID``, the group id Synapse will run as [default 991]
+* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
+
+If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
+then customize it manually. No other environment variable is required.
+
+Otherwise, a dynamic configuration file will be used. The following environment
+variables are available for configuration:
+
+* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
+* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
+ statistics reporting back to the Matrix project which helps us to get funding.
+* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
+ you run your own TLS-capable reverse proxy).
+* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
+ the Synapse instance.
+* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
+* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
+* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
+* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
+ key in order to enable recaptcha upon registration.
+* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
+ key in order to enable recaptcha upon registration.
+* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
+ uris to enable TURN for this homeserver.
+* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
+
+Shared secrets, that will be initialized to random values if not set:
+
+* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
+ registration is disable.
+* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
+ to the server.
+
+Database specific values (will use SQLite if not set):
+
+* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
+* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
+* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
+* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
+
+Mail server specific values (will not send emails if not set):
+
+* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
+* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
+* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
+* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
+
+## Build
+
+Build the docker image with the `docker build` command from the root of the synapse repository.
+
+```
+docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
+```
+
+The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
+
+You may have a local Python wheel cache available, in which case copy the relevant
+packages in the ``cache/`` directory at the root of the project.
diff --git a/contrib/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index 6bc25bb4..6bc25bb4 100644
--- a/contrib/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
diff --git a/contrib/docker/conf/log.config b/docker/conf/log.config
index 18519958..18519958 100644
--- a/contrib/docker/conf/log.config
+++ b/docker/conf/log.config
diff --git a/contrib/docker/start.py b/docker/start.py
index 90e8b9c5..90e8b9c5 100755
--- a/contrib/docker/start.py
+++ b/docker/start.py
diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst
new file mode 100644
index 00000000..209cd140
--- /dev/null
+++ b/docs/admin_api/register_api.rst
@@ -0,0 +1,63 @@
+Shared-Secret Registration
+==========================
+
+This API allows for the creation of users in an administrative and
+non-interactive way. This is generally used for bootstrapping a Synapse
+instance with administrator accounts.
+
+To authenticate yourself to the server, you will need both the shared secret
+(``registration_shared_secret`` in the homeserver configuration), and a
+one-time nonce. If the registration shared secret is not configured, this API
+is not enabled.
+
+To fetch the nonce, you need to request one from the API::
+
+ > GET /_matrix/client/r0/admin/register
+
+ < {"nonce": "thisisanonce"}
+
+Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
+body containing the nonce, username, password, whether they are an admin
+(optional, False by default), and a HMAC digest of the content.
+
+As an example::
+
+ > POST /_matrix/client/r0/admin/register
+ > {
+ "nonce": "thisisanonce",
+ "username": "pepper_roni",
+ "password": "pizza",
+ "admin": true,
+ "mac": "mac_digest_here"
+ }
+
+ < {
+ "access_token": "token_here",
+ "user_id": "@pepper_roni@test",
+ "home_server": "test",
+ "device_id": "device_id_here"
+ }
+
+The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
+the shared secret and the content being the nonce, user, password, and either
+the string "admin" or "notadmin", each separated by NULs. For an example of
+generation in Python::
+
+ import hmac, hashlib
+
+ def generate_mac(nonce, user, password, admin=False):
+
+ mac = hmac.new(
+ key=shared_secret,
+ digestmod=hashlib.sha1,
+ )
+
+ mac.update(nonce.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(user.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(password.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(b"admin" if admin else b"notadmin")
+
+ return mac.hexdigest()
diff --git a/docs/workers.rst b/docs/workers.rst
index 1d521b9e..c5b37c3d 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -206,6 +206,10 @@ Handles client API endpoints. It can handle REST endpoints matching the
following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
+ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
+ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
+ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
+ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
``synapse.app.user_dir``
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pyproject.toml b/pyproject.toml
index d1603b5d..dd099dc9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,30 @@
[tool.towncrier]
package = "synapse"
- filename = "CHANGES.rst"
+ filename = "CHANGES.md"
directory = "changelog.d"
- issue_format = "`#{issue} <https://github.com/matrix-org/synapse/issues/{issue}>`_"
+ issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
+
+ [[tool.towncrier.type]]
+ directory = "feature"
+ name = "Features"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "bugfix"
+ name = "Bugfixes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "doc"
+ name = "Improved Documentation"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "removal"
+ name = "Deprecations and Removals"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "misc"
+ name = "Internal Changes"
+ showcontent = true
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 12ed20d6..8c3d4293 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -26,11 +26,37 @@ import yaml
def request_registration(user, password, server_location, shared_secret, admin=False):
+ req = urllib2.Request(
+ "%s/_matrix/client/r0/admin/register" % (server_location,),
+ headers={'Content-Type': 'application/json'}
+ )
+
+ try:
+ if sys.version_info[:3] >= (2, 7, 9):
+ # As of version 2.7.9, urllib2 now checks SSL certs
+ import ssl
+ f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
+ else:
+ f = urllib2.urlopen(req)
+ body = f.read()
+ f.close()
+ nonce = json.loads(body)["nonce"]
+ except urllib2.HTTPError as e:
+ print "ERROR! Received %d %s" % (e.code, e.reason,)
+ if 400 <= e.code < 500:
+ if e.info().type == "application/json":
+ resp = json.load(e)
+ if "error" in resp:
+ print resp["error"]
+ sys.exit(1)
+
mac = hmac.new(
key=shared_secret,
digestmod=hashlib.sha1,
)
+ mac.update(nonce)
+ mac.update("\x00")
mac.update(user)
mac.update("\x00")
mac.update(password)
@@ -40,10 +66,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
mac = mac.hexdigest()
data = {
- "user": user,
+ "nonce": nonce,
+ "username": user,
"password": password,
"mac": mac,
- "type": "org.matrix.login.shared_secret",
"admin": admin,
}
@@ -52,7 +78,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
print "Sending registration request..."
req = urllib2.Request(
- "%s/_matrix/client/api/v1/register" % (server_location,),
+ "%s/_matrix/client/r0/admin/register" % (server_location,),
data=json.dumps(data),
headers={'Content-Type': 'application/json'}
)
diff --git a/setup.cfg b/setup.cfg
index 9b5b75cd..c2620be6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,12 +14,17 @@ ignore =
pylint.cfg
tox.ini
-[flake8]
+[pep8]
max-line-length = 90
-# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
-# E203 is contrary to PEP8.
+# W503 requires that binary operators be at the end, not start, of lines. Erik
+# doesn't like it. E203 is contrary to PEP8.
ignore = W503,E203
+[flake8]
+# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
+# pep8 to do those checks), but not the "max-line-length" setting
+max-line-length = 90
+
[isort]
line_length = 89
not_skip = __init__.py
@@ -31,3 +36,4 @@ known_compat = mock,six
known_twisted=twisted,OpenSSL
multi_line_output=3
include_trailing_comma=true
+combine_as_imports=true
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 5c0f2f83..a14d578e 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -17,4 +17,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.33.0"
+__version__ = "0.33.2"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index bc629832..5bbbe8e2 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -65,8 +65,9 @@ class Auth(object):
@defer.inlineCallbacks
def check_from_context(self, event, context, do_sig_check=True):
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.compute_auth_events(
- event, context.prev_state_ids, for_verification=True,
+ event, prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
@@ -251,10 +252,10 @@ class Auth(object):
if ip_address not in app_service.ip_range_whitelist:
defer.returnValue((None, None))
- if "user_id" not in request.args:
+ if b"user_id" not in request.args:
defer.returnValue((app_service.sender, app_service))
- user_id = request.args["user_id"][0]
+ user_id = request.args[b"user_id"][0].decode('utf8')
if app_service.sender == user_id:
defer.returnValue((app_service.sender, app_service))
@@ -544,7 +545,8 @@ class Auth(object):
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
- auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
auth_events_entries = yield self.store.add_event_hashes(
auth_ids
@@ -737,3 +739,37 @@ class Auth(object):
)
return query_params[0]
+
+ @defer.inlineCallbacks
+ def check_in_room_or_world_readable(self, room_id, user_id):
+ """Checks that the user is or was in the room or the room is world
+ readable. If it isn't then an exception is raised.
+
+ Returns:
+ Deferred[tuple[str, str|None]]: Resolves to the current membership of
+ the user in the room and the membership event ID of the user. If
+ the user is not in the room and never has been, then
+ `(Membership.JOIN, None)` is returned.
+ """
+
+ try:
+ # check_user_was_in_room will return the most recent membership
+ # event for the user if:
+ # * The user is a non-guest user, and was ever in the room
+ # * The user is a guest user, and has joined the room
+ # else it will throw.
+ member_event = yield self.check_user_was_in_room(room_id, user_id)
+ defer.returnValue((member_event.membership, member_event.event_id))
+ except AuthError:
+ visibility = yield self.state.get_current_state(
+ room_id, EventTypes.RoomHistoryVisibility, ""
+ )
+ if (
+ visibility and
+ visibility.content["history_visibility"] == "world_readable"
+ ):
+ defer.returnValue((Membership.JOIN, None))
+ return
+ raise AuthError(
+ 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
+ )
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 6074df29..b41d5950 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -55,6 +55,7 @@ class Codes(object):
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
+ MAU_LIMIT_EXCEEDED = "M_MAU_LIMIT_EXCEEDED"
class CodeMessageException(RuntimeError):
@@ -69,20 +70,6 @@ class CodeMessageException(RuntimeError):
self.code = code
self.msg = msg
- def error_dict(self):
- return cs_error(self.msg)
-
-
-class MatrixCodeMessageException(CodeMessageException):
- """An error from a general matrix endpoint, eg. from a proxied Matrix API call.
-
- Attributes:
- errcode (str): Matrix error code e.g 'M_FORBIDDEN'
- """
- def __init__(self, code, msg, errcode=Codes.UNKNOWN):
- super(MatrixCodeMessageException, self).__init__(code, msg)
- self.errcode = errcode
-
class SynapseError(CodeMessageException):
"""A base exception type for matrix errors which have an errcode and error
@@ -108,38 +95,28 @@ class SynapseError(CodeMessageException):
self.errcode,
)
- @classmethod
- def from_http_response_exception(cls, err):
- """Make a SynapseError based on an HTTPResponseException
-
- This is useful when a proxied request has failed, and we need to
- decide how to map the failure onto a matrix error to send back to the
- client.
-
- An attempt is made to parse the body of the http response as a matrix
- error. If that succeeds, the errcode and error message from the body
- are used as the errcode and error message in the new synapse error.
-
- Otherwise, the errcode is set to M_UNKNOWN, and the error message is
- set to the reason code from the HTTP response.
- Args:
- err (HttpResponseException):
+class ProxiedRequestError(SynapseError):
+ """An error from a general matrix endpoint, eg. from a proxied Matrix API call.
- Returns:
- SynapseError:
- """
- # try to parse the body as json, to get better errcode/msg, but
- # default to M_UNKNOWN with the HTTP status as the error text
- try:
- j = json.loads(err.response)
- except ValueError:
- j = {}
- errcode = j.get('errcode', Codes.UNKNOWN)
- errmsg = j.get('error', err.msg)
+ Attributes:
+ errcode (str): Matrix error code e.g 'M_FORBIDDEN'
+ """
+ def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
+ super(ProxiedRequestError, self).__init__(
+ code, msg, errcode
+ )
+ if additional_fields is None:
+ self._additional_fields = {}
+ else:
+ self._additional_fields = dict(additional_fields)
- res = SynapseError(err.code, errmsg, errcode)
- return res
+ def error_dict(self):
+ return cs_error(
+ self.msg,
+ self.errcode,
+ **self._additional_fields
+ )
class ConsentNotGivenError(SynapseError):
@@ -308,14 +285,6 @@ class LimitExceededError(SynapseError):
)
-def cs_exception(exception):
- if isinstance(exception, CodeMessageException):
- return exception.error_dict()
- else:
- logger.error("Unknown exception type: %s", type(exception))
- return {}
-
-
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
""" Utility method for constructing an error response for client-server
interactions.
@@ -372,7 +341,7 @@ class HttpResponseException(CodeMessageException):
Represents an HTTP-level failure of an outbound request
Attributes:
- response (str): body of response
+ response (bytes): body of response
"""
def __init__(self, code, msg, response):
"""
@@ -380,7 +349,39 @@ class HttpResponseException(CodeMessageException):
Args:
code (int): HTTP status code
msg (str): reason phrase from HTTP response status line
- response (str): body of response
+ response (bytes): body of response
"""
super(HttpResponseException, self).__init__(code, msg)
self.response = response
+
+ def to_synapse_error(self):
+ """Make a SynapseError based on an HTTPResponseException
+
+ This is useful when a proxied request has failed, and we need to
+ decide how to map the failure onto a matrix error to send back to the
+ client.
+
+ An attempt is made to parse the body of the http response as a matrix
+ error. If that succeeds, the errcode and error message from the body
+ are used as the errcode and error message in the new synapse error.
+
+ Otherwise, the errcode is set to M_UNKNOWN, and the error message is
+ set to the reason code from the HTTP response.
+
+ Returns:
+ SynapseError:
+ """
+ # try to parse the body as json, to get better errcode/msg, but
+ # default to M_UNKNOWN with the HTTP status as the error text
+ try:
+ j = json.loads(self.response)
+ except ValueError:
+ j = {}
+
+ if not isinstance(j, dict):
+ j = {}
+
+ errcode = j.pop('errcode', Codes.UNKNOWN)
+ errmsg = j.pop('error', self.msg)
+
+ return ProxiedRequestError(self.code, errmsg, errcode, j)
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 25346baa..186831e1 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -113,7 +113,13 @@ ROOM_EVENT_FILTER_SCHEMA = {
},
"contains_url": {
"type": "boolean"
- }
+ },
+ "lazy_load_members": {
+ "type": "boolean"
+ },
+ "include_redundant_members": {
+ "type": "boolean"
+ },
}
}
@@ -261,6 +267,12 @@ class FilterCollection(object):
def ephemeral_limit(self):
return self._room_ephemeral_filter.limit()
+ def lazy_load_members(self):
+ return self._room_state_filter.lazy_load_members()
+
+ def include_redundant_members(self):
+ return self._room_state_filter.include_redundant_members()
+
def filter_presence(self, events):
return self._presence_filter.filter(events)
@@ -417,6 +429,12 @@ class Filter(object):
def limit(self):
return self.filter_json.get("limit", 10)
+ def lazy_load_members(self):
+ return self.filter_json.get("lazy_load_members", False)
+
+ def include_redundant_members(self):
+ return self.filter_json.get("include_redundant_members", False)
+
def _matches_wildcard(actual_value, filter_value):
if filter_value.endswith("*"):
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index b0ea26dc..e2c91123 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -31,6 +31,7 @@ from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.directory import DirectoryStore
@@ -40,7 +41,13 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v1.room import PublicRoomListRestServlet
+from synapse.rest.client.v1.room import (
+ JoinedRoomMemberListRestServlet,
+ PublicRoomListRestServlet,
+ RoomEventContextServlet,
+ RoomMemberListRestServlet,
+ RoomStateRestServlet,
+)
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -52,6 +59,7 @@ logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
+ SlavedAccountDataStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
@@ -82,7 +90,13 @@ class ClientReaderServer(HomeServer):
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
+
PublicRoomListRestServlet(self).register(resource)
+ RoomMemberListRestServlet(self).register(resource)
+ JoinedRoomMemberListRestServlet(self).register(resource)
+ RoomStateRestServlet(self).register(resource)
+ RoomEventContextServlet(self).register(resource)
+
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 14e6dca5..fba51c26 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -18,6 +18,10 @@ import logging
import os
import sys
+from six import iteritems
+
+from prometheus_client import Gauge
+
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, NoResource
@@ -47,6 +51,7 @@ from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import RootRedirect
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.module_api import ModuleApi
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
@@ -297,6 +302,11 @@ class SynapseHomeServer(HomeServer):
quit_with_error(e.message)
+# Gauges to expose monthly active user control metrics
+current_mau_gauge = Gauge("synapse_admin_current_mau", "Current MAU")
+max_mau_value_gauge = Gauge("synapse_admin_max_mau_value", "MAU Limit")
+
+
def setup(config_options):
"""
Args:
@@ -425,6 +435,9 @@ def run(hs):
# currently either 0 or 1
stats_process = []
+ def start_phone_stats_home():
+ return run_as_background_process("phone_stats_home", phone_stats_home)
+
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
@@ -442,7 +455,7 @@ def run(hs):
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
- for name, count in daily_user_type_results.iteritems():
+ for name, count in iteritems(daily_user_type_results):
stats["daily_user_type_" + name] = count
room_count = yield hs.get_datastore().get_room_count()
@@ -453,7 +466,7 @@ def run(hs):
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
r30_results = yield hs.get_datastore().count_r30_users()
- for name, count in r30_results.iteritems():
+ for name, count in iteritems(r30_results):
stats["r30_users_" + name] = count
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
@@ -496,16 +509,31 @@ def run(hs):
)
def generate_user_daily_visit_stats():
- hs.get_datastore().generate_user_daily_visits()
+ return run_as_background_process(
+ "generate_user_daily_visits",
+ hs.get_datastore().generate_user_daily_visits,
+ )
# Rather than update on per session basis, batch up the requests.
# If you increase the loop period, the accuracy of user_daily_visits
# table will decrease
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
+ @defer.inlineCallbacks
+ def generate_monthly_active_users():
+ count = 0
+ if hs.config.limit_usage_by_mau:
+ count = yield hs.get_datastore().count_monthly_users()
+ current_mau_gauge.set(float(count))
+ max_mau_value_gauge.set(float(hs.config.max_mau_value))
+
+ generate_monthly_active_users()
+ if hs.config.limit_usage_by_mau:
+ clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
+
if hs.config.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
- clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
+ clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
@@ -513,7 +541,7 @@ def run(hs):
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
- clock.call_later(5 * 60, phone_stats_home)
+ clock.call_later(5 * 60, start_phone_stats_home)
if hs.config.daemonize and hs.config.print_pidfile:
print (hs.config.pid_file)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 26b9ec85..e201f18e 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -55,7 +55,6 @@ from synapse.rest.client.v2_alpha import sync
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.storage.presence import UserPresenceState
-from synapse.storage.roommember import RoomMemberStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
@@ -81,9 +80,7 @@ class SynchrotronSlavedStore(
RoomStore,
BaseSlavedStore,
):
- did_forget = (
- RoomMemberStore.__dict__["did_forget"]
- )
+ pass
UPDATE_SYNCING_USERS_MS = 10 * 1000
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
index 68acc15a..d658f967 100755
--- a/synapse/app/synctl.py
+++ b/synapse/app/synctl.py
@@ -25,6 +25,8 @@ import subprocess
import sys
import time
+from six import iteritems
+
import yaml
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
@@ -173,7 +175,7 @@ def main():
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
cache_factors = config.get("synctl_cache_factors", {})
- for cache_name, factor in cache_factors.iteritems():
+ for cache_name, factor in iteritems(cache_factors):
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
worker_configfiles = []
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 18102656..6a471a0a 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -67,6 +67,14 @@ class ServerConfig(Config):
"block_non_admin_invites", False,
)
+ # Options to control access by tracking MAU
+ self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
+ if self.limit_usage_by_mau:
+ self.max_mau_value = config.get(
+ "max_mau_value", 0,
+ )
+ else:
+ self.max_mau_value = 0
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None
federation_domain_whitelist = config.get(
@@ -209,6 +217,8 @@ class ServerConfig(Config):
# different cores. See
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
#
+ # This setting requires the affinity package to be installed!
+ #
# cpu_affinity: 0xFFFFFFFF
# Whether to serve a web client from the HTTP/HTTPS root resource.
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index 3a4e16fa..d07bd24f 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -30,10 +30,10 @@ class VoipConfig(Config):
## Turn ##
# The public URIs of the TURN server to give to clients
- turn_uris: []
+ #turn_uris: []
# The shared secret used to compute passwords for the TURN server
- turn_shared_secret: "YOUR_SHARED_SECRET"
+ #turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index bcd9bb59..368b5f6a 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -13,22 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from six import iteritems
+
from frozendict import frozendict
from twisted.internet import defer
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
+
class EventContext(object):
"""
Attributes:
- current_state_ids (dict[(str, str), str]):
- The current state map including the current event.
- (type, state_key) -> event_id
-
- prev_state_ids (dict[(str, str), str]):
- The current state map excluding the current event.
- (type, state_key) -> event_id
-
state_group (int|None): state group id, if the state has been stored
as a state group. This is usually only None if e.g. the event is
an outlier.
@@ -45,38 +41,77 @@ class EventContext(object):
prev_state_events (?): XXX: is this ever set to anything other than
the empty list?
+
+ _current_state_ids (dict[(str, str), str]|None):
+ The current state map including the current event. None if outlier
+ or we haven't fetched the state from DB yet.
+ (type, state_key) -> event_id
+
+ _prev_state_ids (dict[(str, str), str]|None):
+ The current state map excluding the current event. None if outlier
+ or we haven't fetched the state from DB yet.
+ (type, state_key) -> event_id
+
+ _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
+ been calculated. None if we haven't started calculating yet
+
+ _event_type (str): The type of the event the context is associated with.
+ Only set when state has not been fetched yet.
+
+ _event_state_key (str|None): The state_key of the event the context is
+ associated with. Only set when state has not been fetched yet.
+
+ _prev_state_id (str|None): If the event associated with the context is
+ a state event, then `_prev_state_id` is the event_id of the state
+ that was replaced.
+ Only set when state has not been fetched yet.
"""
__slots__ = [
- "current_state_ids",
- "prev_state_ids",
"state_group",
"rejected",
"prev_group",
"delta_ids",
"prev_state_events",
"app_service",
+ "_current_state_ids",
+ "_prev_state_ids",
+ "_prev_state_id",
+ "_event_type",
+ "_event_state_key",
+ "_fetching_state_deferred",
]
def __init__(self):
+ self.prev_state_events = []
+ self.rejected = False
+ self.app_service = None
+
+ @staticmethod
+ def with_state(state_group, current_state_ids, prev_state_ids,
+ prev_group=None, delta_ids=None):
+ context = EventContext()
+
# The current state including the current event
- self.current_state_ids = None
+ context._current_state_ids = current_state_ids
# The current state excluding the current event
- self.prev_state_ids = None
- self.state_group = None
+ context._prev_state_ids = prev_state_ids
+ context.state_group = state_group
- self.rejected = False
+ context._prev_state_id = None
+ context._event_type = None
+ context._event_state_key = None
+ context._fetching_state_deferred = defer.succeed(None)
# A previously persisted state group and a delta between that
# and this state.
- self.prev_group = None
- self.delta_ids = None
+ context.prev_group = prev_group
+ context.delta_ids = delta_ids
- self.prev_state_events = None
-
- self.app_service = None
+ return context
- def serialize(self, event):
+ @defer.inlineCallbacks
+ def serialize(self, event, store):
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
@@ -92,11 +127,12 @@ class EventContext(object):
# the prev_state_ids, so if we're a state event we include the event
# id that we replaced in the state.
if event.is_state():
- prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
+ prev_state_ids = yield self.get_prev_state_ids(store)
+ prev_state_id = prev_state_ids.get((event.type, event.state_key))
else:
prev_state_id = None
- return {
+ defer.returnValue({
"prev_state_id": prev_state_id,
"event_type": event.type,
"event_state_key": event.state_key if event.is_state() else None,
@@ -106,10 +142,9 @@ class EventContext(object):
"delta_ids": _encode_state_dict(self.delta_ids),
"prev_state_events": self.prev_state_events,
"app_service_id": self.app_service.id if self.app_service else None
- }
+ })
@staticmethod
- @defer.inlineCallbacks
def deserialize(store, input):
"""Converts a dict that was produced by `serialize` back into a
EventContext.
@@ -122,32 +157,115 @@ class EventContext(object):
EventContext
"""
context = EventContext()
+
+ # We use the state_group and prev_state_id stuff to pull the
+ # current_state_ids out of the DB and construct prev_state_ids.
+ context._prev_state_id = input["prev_state_id"]
+ context._event_type = input["event_type"]
+ context._event_state_key = input["event_state_key"]
+
+ context._current_state_ids = None
+ context._prev_state_ids = None
+ context._fetching_state_deferred = None
+
context.state_group = input["state_group"]
- context.rejected = input["rejected"]
context.prev_group = input["prev_group"]
context.delta_ids = _decode_state_dict(input["delta_ids"])
+
+ context.rejected = input["rejected"]
context.prev_state_events = input["prev_state_events"]
- # We use the state_group and prev_state_id stuff to pull the
- # current_state_ids out of the DB and construct prev_state_ids.
- prev_state_id = input["prev_state_id"]
- event_type = input["event_type"]
- event_state_key = input["event_state_key"]
+ app_service_id = input["app_service_id"]
+ if app_service_id:
+ context.app_service = store.get_app_service_by_id(app_service_id)
+
+ return context
+
+ @defer.inlineCallbacks
+ def get_current_state_ids(self, store):
+ """Gets the current state IDs
+
+ Returns:
+ Deferred[dict[(str, str), str]|None]: Returns None if state_group
+ is None, which happens when the associated event is an outlier.
+ """
+
+ if not self._fetching_state_deferred:
+ self._fetching_state_deferred = run_in_background(
+ self._fill_out_state, store,
+ )
+
+ yield make_deferred_yieldable(self._fetching_state_deferred)
+
+ defer.returnValue(self._current_state_ids)
+
+ @defer.inlineCallbacks
+ def get_prev_state_ids(self, store):
+ """Gets the prev state IDs
+
+ Returns:
+ Deferred[dict[(str, str), str]|None]: Returns None if state_group
+ is None, which happens when the associated event is an outlier.
+ """
+
+ if not self._fetching_state_deferred:
+ self._fetching_state_deferred = run_in_background(
+ self._fill_out_state, store,
+ )
+
+ yield make_deferred_yieldable(self._fetching_state_deferred)
- context.current_state_ids = yield store.get_state_ids_for_group(
- context.state_group,
+ defer.returnValue(self._prev_state_ids)
+
+ def get_cached_current_state_ids(self):
+ """Gets the current state IDs if we have them already cached.
+
+ Returns:
+ dict[(str, str), str]|None: Returns None if we haven't cached the
+ state or if state_group is None, which happens when the associated
+ event is an outlier.
+ """
+
+ return self._current_state_ids
+
+ @defer.inlineCallbacks
+ def _fill_out_state(self, store):
+ """Called to populate the _current_state_ids and _prev_state_ids
+ attributes by loading from the database.
+ """
+ if self.state_group is None:
+ return
+
+ self._current_state_ids = yield store.get_state_ids_for_group(
+ self.state_group,
)
- if prev_state_id and event_state_key:
- context.prev_state_ids = dict(context.current_state_ids)
- context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
+ if self._prev_state_id and self._event_state_key is not None:
+ self._prev_state_ids = dict(self._current_state_ids)
+
+ key = (self._event_type, self._event_state_key)
+ self._prev_state_ids[key] = self._prev_state_id
else:
- context.prev_state_ids = context.current_state_ids
+ self._prev_state_ids = self._current_state_ids
- app_service_id = input["app_service_id"]
- if app_service_id:
- context.app_service = store.get_app_service_by_id(app_service_id)
+ @defer.inlineCallbacks
+ def update_state(self, state_group, prev_state_ids, current_state_ids,
+ prev_group, delta_ids):
+ """Replace the state in the context
+ """
+
+ # We need to make sure we wait for any ongoing fetching of state
+ # to complete so that the updated state doesn't get clobbered
+ if self._fetching_state_deferred:
+ yield make_deferred_yieldable(self._fetching_state_deferred)
+
+ self.state_group = state_group
+ self._prev_state_ids = prev_state_ids
+ self.prev_group = prev_group
+ self._current_state_ids = current_state_ids
+ self.delta_ids = delta_ids
- defer.returnValue(context)
+ # We need to ensure that that we've marked as having fetched the state
+ self._fetching_state_deferred = defer.succeed(None)
def _encode_state_dict(state_dict):
@@ -159,7 +277,7 @@ def _encode_state_dict(state_dict):
return [
(etype, state_key, v)
- for (etype, state_key), v in state_dict.iteritems()
+ for (etype, state_key), v in iteritems(state_dict)
]
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 62d7ed13..7550e11b 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -48,6 +48,13 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
PDU_RETRY_TIME_MS = 1 * 60 * 1000
+class InvalidResponseError(RuntimeError):
+ """Helper for _try_destination_list: indicates that the server returned a response
+ we couldn't parse
+ """
+ pass
+
+
class FederationClient(FederationBase):
def __init__(self, hs):
super(FederationClient, self).__init__(hs)
@@ -458,6 +465,61 @@ class FederationClient(FederationBase):
defer.returnValue(signed_auth)
@defer.inlineCallbacks
+ def _try_destination_list(self, description, destinations, callback):
+ """Try an operation on a series of servers, until it succeeds
+
+ Args:
+ description (unicode): description of the operation we're doing, for logging
+
+ destinations (Iterable[unicode]): list of server_names to try
+
+ callback (callable): Function to run for each server. Passed a single
+ argument: the server_name to try. May return a deferred.
+
+ If the callback raises a CodeMessageException with a 300/400 code,
+ attempts to perform the operation stop immediately and the exception is
+ reraised.
+
+ Otherwise, if the callback raises an Exception the error is logged and the
+ next server tried. Normally the stacktrace is logged but this is
+ suppressed if the exception is an InvalidResponseError.
+
+ Returns:
+ The [Deferred] result of callback, if it succeeds
+
+ Raises:
+ SynapseError if the chosen remote server returns a 300/400 code.
+
+ RuntimeError if no servers were reachable.
+ """
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+
+ try:
+ res = yield callback(destination)
+ defer.returnValue(res)
+ except InvalidResponseError as e:
+ logger.warn(
+ "Failed to %s via %s: %s",
+ description, destination, e,
+ )
+ except HttpResponseException as e:
+ if not 500 <= e.code < 600:
+ raise e.to_synapse_error()
+ else:
+ logger.warn(
+ "Failed to %s via %s: %i %s",
+ description, destination, e.code, e.message,
+ )
+ except Exception:
+ logger.warn(
+ "Failed to %s via %s",
+ description, destination, exc_info=1,
+ )
+
+ raise RuntimeError("Failed to %s via any server", description)
+
def make_membership_event(self, destinations, room_id, user_id, membership,
content={},):
"""
@@ -481,7 +543,7 @@ class FederationClient(FederationBase):
Deferred: resolves to a tuple of (origin (str), event (object))
where origin is the remote homeserver which generated the event.
- Fails with a ``CodeMessageException`` if the chosen remote server
+ Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
@@ -492,50 +554,35 @@ class FederationClient(FederationBase):
"make_membership_event called with membership='%s', must be one of %s" %
(membership, ",".join(valid_memberships))
)
- for destination in destinations:
- if destination == self.server_name:
- continue
- try:
- ret = yield self.transport_layer.make_membership_event(
- destination, room_id, user_id, membership
- )
+ @defer.inlineCallbacks
+ def send_request(destination):
+ ret = yield self.transport_layer.make_membership_event(
+ destination, room_id, user_id, membership
+ )
- pdu_dict = ret["event"]
+ pdu_dict = ret["event"]
- logger.debug("Got response to make_%s: %s", membership, pdu_dict)
+ logger.debug("Got response to make_%s: %s", membership, pdu_dict)
- pdu_dict["content"].update(content)
+ pdu_dict["content"].update(content)
- # The protoevent received over the JSON wire may not have all
- # the required fields. Lets just gloss over that because
- # there's some we never care about
- if "prev_state" not in pdu_dict:
- pdu_dict["prev_state"] = []
+ # The protoevent received over the JSON wire may not have all
+ # the required fields. Lets just gloss over that because
+ # there's some we never care about
+ if "prev_state" not in pdu_dict:
+ pdu_dict["prev_state"] = []
- ev = builder.EventBuilder(pdu_dict)
+ ev = builder.EventBuilder(pdu_dict)
- defer.returnValue(
- (destination, ev)
- )
- break
- except CodeMessageException as e:
- if not 500 <= e.code < 600:
- raise
- else:
- logger.warn(
- "Failed to make_%s via %s: %s",
- membership, destination, e.message
- )
- except Exception as e:
- logger.warn(
- "Failed to make_%s via %s: %s",
- membership, destination, e.message
- )
+ defer.returnValue(
+ (destination, ev)
+ )
- raise RuntimeError("Failed to send to any server.")
+ return self._try_destination_list(
+ "make_" + membership, destinations, send_request,
+ )
- @defer.inlineCallbacks
def send_join(self, destinations, pdu):
"""Sends a join event to one of a list of homeservers.
@@ -552,103 +599,91 @@ class FederationClient(FederationBase):
giving the serer the event was sent to, ``state`` (?) and
``auth_chain``.
- Fails with a ``CodeMessageException`` if the chosen remote server
+ Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
- for destination in destinations:
- if destination == self.server_name:
- continue
-
- try:
- time_now = self._clock.time_msec()
- _, content = yield self.transport_layer.send_join(
- destination=destination,
- room_id=pdu.room_id,
- event_id=pdu.event_id,
- content=pdu.get_pdu_json(time_now),
- )
+ @defer.inlineCallbacks
+ def send_request(destination):
+ time_now = self._clock.time_msec()
+ _, content = yield self.transport_layer.send_join(
+ destination=destination,
+ room_id=pdu.room_id,
+ event_id=pdu.event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
- logger.debug("Got content: %s", content)
+ logger.debug("Got content: %s", content)
- state = [
- event_from_pdu_json(p, outlier=True)
- for p in content.get("state", [])
- ]
+ state = [
+ event_from_pdu_json(p, outlier=True)
+ for p in content.get("state", [])
+ ]
- auth_chain = [
- event_from_pdu_json(p, outlier=True)
- for p in content.get("auth_chain", [])
- ]
+ auth_chain = [
+ event_from_pdu_json(p, outlier=True)
+ for p in content.get("auth_chain", [])
+ ]
- pdus = {
- p.event_id: p
- for p in itertools.chain(state, auth_chain)
- }
+ pdus = {
+ p.event_id: p
+ for p in itertools.chain(state, auth_chain)
+ }
- valid_pdus = yield self._check_sigs_and_hash_and_fetch(
- destination, list(pdus.values()),
- outlier=True,
- )
+ valid_pdus = yield self._check_sigs_and_hash_and_fetch(
+ destination, list(pdus.values()),
+ outlier=True,
+ )
- valid_pdus_map = {
- p.event_id: p
- for p in valid_pdus
- }
-
- # NB: We *need* to copy to ensure that we don't have multiple
- # references being passed on, as that causes... issues.
- signed_state = [
- copy.copy(valid_pdus_map[p.event_id])
- for p in state
- if p.event_id in valid_pdus_map
- ]
+ valid_pdus_map = {
+ p.event_id: p
+ for p in valid_pdus
+ }
- signed_auth = [
- valid_pdus_map[p.event_id]
- for p in auth_chain
- if p.event_id in valid_pdus_map
- ]
+ # NB: We *need* to copy to ensure that we don't have multiple
+ # references being passed on, as that causes... issues.
+ signed_state = [
+ copy.copy(valid_pdus_map[p.event_id])
+ for p in state
+ if p.event_id in valid_pdus_map
+ ]
- # NB: We *need* to copy to ensure that we don't have multiple
- # references being passed on, as that causes... issues.
- for s in signed_state:
- s.internal_metadata = copy.deepcopy(s.internal_metadata)
+ signed_auth = [
+ valid_pdus_map[p.event_id]
+ for p in auth_chain
+ if p.event_id in valid_pdus_map
+ ]
- auth_chain.sort(key=lambda e: e.depth)
+ # NB: We *need* to copy to ensure that we don't have multiple
+ # references being passed on, as that causes... issues.
+ for s in signed_state:
+ s.internal_metadata = copy.deepcopy(s.internal_metadata)
- defer.returnValue({
- "state": signed_state,
- "auth_chain": signed_auth,
- "origin": destination,
- })
- except CodeMessageException as e:
- if not 500 <= e.code < 600:
- raise
- else:
- logger.exception(
- "Failed to send_join via %s: %s",
- destination, e.message
- )
- except Exception as e:
- logger.exception(
- "Failed to send_join via %s: %s",
- destination, e.message
- )
+ auth_chain.sort(key=lambda e: e.depth)
- raise RuntimeError("Failed to send to any server.")
+ defer.returnValue({
+ "state": signed_state,
+ "auth_chain": signed_auth,
+ "origin": destination,
+ })
+ return self._try_destination_list("send_join", destinations, send_request)
@defer.inlineCallbacks
def send_invite(self, destination, room_id, event_id, pdu):
time_now = self._clock.time_msec()
- code, content = yield self.transport_layer.send_invite(
- destination=destination,
- room_id=room_id,
- event_id=event_id,
- content=pdu.get_pdu_json(time_now),
- )
+ try:
+ code, content = yield self.transport_layer.send_invite(
+ destination=destination,
+ room_id=room_id,
+ event_id=event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
+ except HttpResponseException as e:
+ if e.code == 403:
+ raise e.to_synapse_error()
+ raise
pdu_dict = content["event"]
@@ -663,7 +698,6 @@ class FederationClient(FederationBase):
defer.returnValue(pdu)
- @defer.inlineCallbacks
def send_leave(self, destinations, pdu):
"""Sends a leave event to one of a list of homeservers.
@@ -680,35 +714,25 @@ class FederationClient(FederationBase):
Return:
Deferred: resolves to None.
- Fails with a ``CodeMessageException`` if the chosen remote server
- returns a non-200 code.
+ Fails with a ``SynapseError`` if the chosen remote server
+ returns a 300/400 code.
Fails with a ``RuntimeError`` if no servers were reachable.
"""
- for destination in destinations:
- if destination == self.server_name:
- continue
-
- try:
- time_now = self._clock.time_msec()
- _, content = yield self.transport_layer.send_leave(
- destination=destination,
- room_id=pdu.room_id,
- event_id=pdu.event_id,
- content=pdu.get_pdu_json(time_now),
- )
+ @defer.inlineCallbacks
+ def send_request(destination):
+ time_now = self._clock.time_msec()
+ _, content = yield self.transport_layer.send_leave(
+ destination=destination,
+ room_id=pdu.room_id,
+ event_id=pdu.event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
- logger.debug("Got content: %s", content)
- defer.returnValue(None)
- except CodeMessageException:
- raise
- except Exception as e:
- logger.exception(
- "Failed to send_leave via %s: %s",
- destination, e.message
- )
+ logger.debug("Got content: %s", content)
+ defer.returnValue(None)
- raise RuntimeError("Failed to send to any server.")
+ return self._try_destination_list("send_leave", destinations, send_request)
def get_public_rooms(self, destination, limit=None, since_token=None,
search_filter=None, include_all_networks=False,
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 48f26db6..bf89d568 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -24,6 +24,7 @@ from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
+from twisted.python import failure
from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError, FederationError, NotFoundError, SynapseError
@@ -186,8 +187,12 @@ class FederationServer(FederationBase):
logger.warn("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
+ f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
- logger.exception("Failed to handle PDU %s", event_id)
+ logger.error(
+ "Failed to handle PDU %s: %s",
+ event_id, f.getTraceback().rstrip(),
+ )
yield async.concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(),
@@ -202,10 +207,6 @@ class FederationServer(FederationBase):
edu.content
)
- pdu_failures = getattr(transaction, "pdu_failures", [])
- for failure in pdu_failures:
- logger.info("Got failure %r", failure)
-
response = {
"pdus": pdu_results,
}
@@ -425,6 +426,7 @@ class FederationServer(FederationBase):
ret = yield self.handler.on_query_auth(
origin,
event_id,
+ room_id,
signed_auth,
content.get("rejects", []),
content.get("missing", []),
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 5157c386..0bb46838 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -62,8 +62,6 @@ class FederationRemoteSendQueue(object):
self.edus = SortedDict() # stream position -> Edu
- self.failures = SortedDict() # stream position -> (destination, Failure)
-
self.device_messages = SortedDict() # stream position -> destination
self.pos = 1
@@ -79,7 +77,7 @@ class FederationRemoteSendQueue(object):
for queue_name in [
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
- "edus", "failures", "device_messages", "pos_time",
+ "edus", "device_messages", "pos_time",
]:
register(queue_name, getattr(self, queue_name))
@@ -149,12 +147,6 @@ class FederationRemoteSendQueue(object):
for key in keys[:i]:
del self.edus[key]
- # Delete things out of failure map
- keys = self.failures.keys()
- i = self.failures.bisect_left(position_to_delete)
- for key in keys[:i]:
- del self.failures[key]
-
# Delete things out of device map
keys = self.device_messages.keys()
i = self.device_messages.bisect_left(position_to_delete)
@@ -204,13 +196,6 @@ class FederationRemoteSendQueue(object):
self.notifier.on_new_replication_data()
- def send_failure(self, failure, destination):
- """As per TransactionQueue"""
- pos = self._next_pos()
-
- self.failures[pos] = (destination, str(failure))
- self.notifier.on_new_replication_data()
-
def send_device_messages(self, destination):
"""As per TransactionQueue"""
pos = self._next_pos()
@@ -285,17 +270,6 @@ class FederationRemoteSendQueue(object):
for (pos, edu) in edus:
rows.append((pos, EduRow(edu)))
- # Fetch changed failures
- i = self.failures.bisect_right(from_token)
- j = self.failures.bisect_right(to_token) + 1
- failures = self.failures.items()[i:j]
-
- for (pos, (destination, failure)) in failures:
- rows.append((pos, FailureRow(
- destination=destination,
- failure=failure,
- )))
-
# Fetch changed device messages
i = self.device_messages.bisect_right(from_token)
j = self.device_messages.bisect_right(to_token) + 1
@@ -417,34 +391,6 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", (
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
-class FailureRow(BaseFederationRow, namedtuple("FailureRow", (
- "destination", # str
- "failure",
-))):
- """Streams failures to a remote server. Failures are issued when there was
- something wrong with a transaction the remote sent us, e.g. it included
- an event that was invalid.
- """
-
- TypeId = "f"
-
- @staticmethod
- def from_data(data):
- return FailureRow(
- destination=data["destination"],
- failure=data["failure"],
- )
-
- def to_data(self):
- return {
- "destination": self.destination,
- "failure": self.failure,
- }
-
- def add_to_buffer(self, buff):
- buff.failures.setdefault(self.destination, []).append(self.failure)
-
-
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
"destination", # str
))):
@@ -471,7 +417,6 @@ TypeToRow = {
PresenceRow,
KeyedEduRow,
EduRow,
- FailureRow,
DeviceRow,
)
}
@@ -481,7 +426,6 @@ ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
"presence", # list(UserPresenceState)
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
- "failures", # dict of destination -> [failures]
"device_destinations", # set of destinations
))
@@ -503,7 +447,6 @@ def process_rows_for_federation(transaction_queue, rows):
presence=[],
keyed_edus={},
edus={},
- failures={},
device_destinations=set(),
)
@@ -532,9 +475,5 @@ def process_rows_for_federation(transaction_queue, rows):
edu.destination, edu.edu_type, edu.content, key=None,
)
- for destination, failure_list in iteritems(buff.failures):
- for failure in failure_list:
- transaction_queue.send_failure(destination, failure)
-
for destination in buff.device_destinations:
transaction_queue.send_device_messages(destination)
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index 5a956ecf..78f9d40a 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -30,7 +30,8 @@ from synapse.metrics import (
sent_edus_counter,
sent_transactions_counter,
)
-from synapse.util import PreserveLoggingContext, logcontext
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import logcontext
from synapse.util.metrics import measure_func
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
@@ -115,9 +116,6 @@ class TransactionQueue(object):
),
)
- # destination -> list of tuple(failure, deferred)
- self.pending_failures_by_dest = {}
-
# destination -> stream_id of last successfully sent to-device message.
# NB: may be a long or an int.
self.last_device_stream_id_by_dest = {}
@@ -165,10 +163,11 @@ class TransactionQueue(object):
if self._is_processing:
return
- # fire off a processing loop in the background. It's likely it will
- # outlast the current request, so run it in the sentinel logcontext.
- with PreserveLoggingContext():
- self._process_event_queue_loop()
+ # fire off a processing loop in the background
+ run_as_background_process(
+ "process_event_queue_for_federation",
+ self._process_event_queue_loop,
+ )
@defer.inlineCallbacks
def _process_event_queue_loop(self):
@@ -380,19 +379,6 @@ class TransactionQueue(object):
self._attempt_new_transaction(destination)
- def send_failure(self, failure, destination):
- if destination == self.server_name or destination == "localhost":
- return
-
- if not self.can_send_to(destination):
- return
-
- self.pending_failures_by_dest.setdefault(
- destination, []
- ).append(failure)
-
- self._attempt_new_transaction(destination)
-
def send_device_messages(self, destination):
if destination == self.server_name or destination == "localhost":
return
@@ -432,14 +418,11 @@ class TransactionQueue(object):
logger.debug("TX [%s] Starting transaction loop", destination)
- # Drop the logcontext before starting the transaction. It doesn't
- # really make sense to log all the outbound transactions against
- # whatever path led us to this point: that's pretty arbitrary really.
- #
- # (this also means we can fire off _perform_transaction without
- # yielding)
- with logcontext.PreserveLoggingContext():
- self._transaction_transmission_loop(destination)
+ run_as_background_process(
+ "federation_transaction_transmission_loop",
+ self._transaction_transmission_loop,
+ destination,
+ )
@defer.inlineCallbacks
def _transaction_transmission_loop(self, destination):
@@ -470,7 +453,6 @@ class TransactionQueue(object):
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
pending_edus = self.pending_edus_by_dest.pop(destination, [])
pending_presence = self.pending_presence_by_dest.pop(destination, {})
- pending_failures = self.pending_failures_by_dest.pop(destination, [])
pending_edus.extend(
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
@@ -498,7 +480,7 @@ class TransactionQueue(object):
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
destination, len(pending_pdus))
- if not pending_pdus and not pending_edus and not pending_failures:
+ if not pending_pdus and not pending_edus:
logger.debug("TX [%s] Nothing to send", destination)
self.last_device_stream_id_by_dest[destination] = (
device_stream_id
@@ -508,7 +490,7 @@ class TransactionQueue(object):
# END CRITICAL SECTION
success = yield self._send_new_transaction(
- destination, pending_pdus, pending_edus, pending_failures,
+ destination, pending_pdus, pending_edus,
)
if success:
sent_transactions_counter.inc()
@@ -585,14 +567,12 @@ class TransactionQueue(object):
@measure_func("_send_new_transaction")
@defer.inlineCallbacks
- def _send_new_transaction(self, destination, pending_pdus, pending_edus,
- pending_failures):
+ def _send_new_transaction(self, destination, pending_pdus, pending_edus):
# Sort based on the order field
pending_pdus.sort(key=lambda t: t[1])
pdus = [x[0] for x in pending_pdus]
edus = pending_edus
- failures = [x.get_dict() for x in pending_failures]
success = True
@@ -602,11 +582,10 @@ class TransactionQueue(object):
logger.debug(
"TX [%s] {%s} Attempting new transaction"
- " (pdus: %d, edus: %d, failures: %d)",
+ " (pdus: %d, edus: %d)",
destination, txn_id,
len(pdus),
len(edus),
- len(failures)
)
logger.debug("TX [%s] Persisting transaction...", destination)
@@ -618,7 +597,6 @@ class TransactionQueue(object):
destination=destination,
pdus=pdus,
edus=edus,
- pdu_failures=failures,
)
self._next_txn_id += 1
@@ -628,12 +606,11 @@ class TransactionQueue(object):
logger.debug("TX [%s] Persisted transaction", destination)
logger.info(
"TX [%s] {%s} Sending transaction [%s],"
- " (PDUs: %d, EDUs: %d, failures: %d)",
+ " (PDUs: %d, EDUs: %d)",
destination, txn_id,
transaction.transaction_id,
len(pdus),
len(edus),
- len(failures),
)
# Actually send the transaction
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index c9beca27..eae5f2b4 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -165,7 +165,7 @@ def _parse_auth_header(header_bytes):
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
- if value.startswith(b"\""):
+ if value.startswith("\""):
return value[1:-1]
else:
return value
@@ -283,11 +283,10 @@ class FederationSendServlet(BaseFederationServlet):
)
logger.info(
- "Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
+ "Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id, origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
- len(transaction_data.get("failures", [])),
)
# We should ideally be getting this from the security layer.
@@ -404,10 +403,10 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
class FederationSendLeaveServlet(BaseFederationServlet):
- PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
+ PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
@defer.inlineCallbacks
- def on_PUT(self, origin, content, query, room_id, txid):
+ def on_PUT(self, origin, content, query, room_id, event_id):
content = yield self.handler.on_send_leave_request(origin, content)
defer.returnValue((200, content))
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index bb1b3b13..c5ab1431 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -73,7 +73,6 @@ class Transaction(JsonEncodedObject):
"previous_ids",
"pdus",
"edus",
- "pdu_failures",
]
internal_keys = [
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index 47452700..b04f4234 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -43,6 +43,7 @@ from signedjson.sign import sign_json
from twisted.internet import defer
from synapse.api.errors import SynapseError
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util.logcontext import run_in_background
@@ -129,7 +130,7 @@ class GroupAttestionRenewer(object):
self.attestations = hs.get_groups_attestation_signing()
self._renew_attestations_loop = self.clock.looping_call(
- self._renew_attestations, 30 * 60 * 1000,
+ self._start_renew_attestations, 30 * 60 * 1000,
)
@defer.inlineCallbacks
@@ -151,6 +152,9 @@ class GroupAttestionRenewer(object):
defer.returnValue({})
+ def _start_renew_attestations(self):
+ return run_as_background_process("renew_attestations", self._renew_attestations)
+
@defer.inlineCallbacks
def _renew_attestations(self):
"""Called periodically to check if we need to update any of our attestations
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
index 4b9923d8..413425fe 100644
--- a/synapse/handlers/__init__.py
+++ b/synapse/handlers/__init__.py
@@ -17,9 +17,7 @@ from .admin import AdminHandler
from .directory import DirectoryHandler
from .federation import FederationHandler
from .identity import IdentityHandler
-from .message import MessageHandler
from .register import RegistrationHandler
-from .room import RoomContextHandler
from .search import SearchHandler
@@ -44,10 +42,8 @@ class Handlers(object):
def __init__(self, hs):
self.registration_handler = RegistrationHandler(hs)
- self.message_handler = MessageHandler(hs)
self.federation_handler = FederationHandler(hs)
self.directory_handler = DirectoryHandler(hs)
self.admin_handler = AdminHandler(hs)
self.identity_handler = IdentityHandler(hs)
self.search_handler = SearchHandler(hs)
- self.room_context_handler = RoomContextHandler(hs)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index b6a8b3aa..704181d2 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -112,8 +112,9 @@ class BaseHandler(object):
guest_access = event.content.get("guest_access", "forbidden")
if guest_access != "can_join":
if context:
+ current_state_ids = yield context.get_current_state_ids(self.store)
current_state = yield self.store.get_events(
- list(context.current_state_ids.values())
+ list(current_state_ids.values())
)
else:
current_state = yield self.state_handler.get_current_state(
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index ec9fe01a..ee41aed6 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -23,6 +23,7 @@ from twisted.internet import defer
import synapse
from synapse.api.constants import EventTypes
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.metrics import Measure
@@ -106,7 +107,9 @@ class ApplicationServicesHandler(object):
yield self._check_user_exists(event.state_key)
if not self.started_scheduler:
- self.scheduler.start().addErrback(log_failure)
+ def start_scheduler():
+ return self.scheduler.start().addErrback(log_failure)
+ run_as_background_process("as_scheduler", start_scheduler)
self.started_scheduler = True
# Fork off pushes to these services
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 402e44cd..184eef09 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -15,6 +15,7 @@
# limitations under the License.
import logging
+import unicodedata
import attr
import bcrypt
@@ -519,6 +520,7 @@ class AuthHandler(BaseHandler):
"""
logger.info("Logging in user %s on device %s", user_id, device_id)
access_token = yield self.issue_access_token(user_id, device_id)
+ yield self._check_mau_limits()
# the device *should* have been registered before we got here; however,
# it's possible we raced against a DELETE operation. The thing we
@@ -626,6 +628,7 @@ class AuthHandler(BaseHandler):
# special case to check for "password" for the check_password interface
# for the auth providers
password = login_submission.get("password")
+
if login_type == LoginType.PASSWORD:
if not self._password_enabled:
raise SynapseError(400, "Password login has been disabled.")
@@ -707,9 +710,10 @@ class AuthHandler(BaseHandler):
multiple inexact matches.
Args:
- user_id (str): complete @user:id
+ user_id (unicode): complete @user:id
+ password (unicode): the provided password
Returns:
- (str) the canonical_user_id, or None if unknown user / bad password
+ (unicode) the canonical_user_id, or None if unknown user / bad password
"""
lookupres = yield self._find_user_id_and_pwd_hash(user_id)
if not lookupres:
@@ -728,15 +732,18 @@ class AuthHandler(BaseHandler):
device_id)
defer.returnValue(access_token)
+ @defer.inlineCallbacks
def validate_short_term_login_token_and_get_user_id(self, login_token):
+ yield self._check_mau_limits()
auth_api = self.hs.get_auth()
+ user_id = None
try:
macaroon = pymacaroons.Macaroon.deserialize(login_token)
user_id = auth_api.get_user_id_from_macaroon(macaroon)
auth_api.validate_macaroon(macaroon, "login", True, user_id)
- return user_id
except Exception:
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
+ defer.returnValue(user_id)
@defer.inlineCallbacks
def delete_access_token(self, access_token):
@@ -849,14 +856,19 @@ class AuthHandler(BaseHandler):
"""Computes a secure hash of password.
Args:
- password (str): Password to hash.
+ password (unicode): Password to hash.
Returns:
- Deferred(str): Hashed password.
+ Deferred(unicode): Hashed password.
"""
def _do_hash():
- return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
- bcrypt.gensalt(self.bcrypt_rounds))
+ # Normalise the Unicode in the password
+ pw = unicodedata.normalize("NFKC", password)
+
+ return bcrypt.hashpw(
+ pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
+ bcrypt.gensalt(self.bcrypt_rounds),
+ ).decode('ascii')
return make_deferred_yieldable(
threads.deferToThreadPool(
@@ -868,16 +880,19 @@ class AuthHandler(BaseHandler):
"""Validates that self.hash(password) == stored_hash.
Args:
- password (str): Password to hash.
- stored_hash (str): Expected hash value.
+ password (unicode): Password to hash.
+ stored_hash (unicode): Expected hash value.
Returns:
Deferred(bool): Whether self.hash(password) == stored_hash.
"""
def _do_validate_hash():
+ # Normalise the Unicode in the password
+ pw = unicodedata.normalize("NFKC", password)
+
return bcrypt.checkpw(
- password.encode('utf8') + self.hs.config.password_pepper,
+ pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
stored_hash.encode('utf8')
)
@@ -892,6 +907,19 @@ class AuthHandler(BaseHandler):
else:
return defer.succeed(False)
+ @defer.inlineCallbacks
+ def _check_mau_limits(self):
+ """
+ Ensure that if mau blocking is enabled that invalid users cannot
+ log in.
+ """
+ if self.hs.config.limit_usage_by_mau is True:
+ current_mau = yield self.store.count_monthly_users()
+ if current_mau >= self.hs.config.max_mau_value:
+ raise AuthError(
+ 403, "MAU Limit Exceeded", errcode=Codes.MAU_LIMIT_EXCEEDED
+ )
+
@attr.s
class MacaroonGenerator(object):
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index c3f2d7fe..f772e62c 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -19,10 +19,12 @@ import random
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import AuthError
from synapse.events import EventBase
from synapse.events.utils import serialize_event
from synapse.types import UserID
from synapse.util.logutils import log_function
+from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
@@ -129,11 +131,13 @@ class EventStreamHandler(BaseHandler):
class EventHandler(BaseHandler):
@defer.inlineCallbacks
- def get_event(self, user, event_id):
+ def get_event(self, user, room_id, event_id):
"""Retrieve a single specified event.
Args:
user (synapse.types.UserID): The user requesting the event
+ room_id (str|None): The expected room id. We'll return None if the
+ event's room does not match.
event_id (str): The event ID to obtain.
Returns:
dict: An event, or None if there is no event matching this ID.
@@ -142,13 +146,26 @@ class EventHandler(BaseHandler):
AuthError if the user does not have the rights to inspect this
event.
"""
- event = yield self.store.get_event(event_id)
+ event = yield self.store.get_event(event_id, check_room_id=room_id)
if not event:
defer.returnValue(None)
return
- if hasattr(event, "room_id"):
- yield self.auth.check_joined_room(event.room_id, user.to_string())
+ users = yield self.store.get_users_in_room(event.room_id)
+ is_peeking = user.to_string() not in users
+
+ filtered = yield filter_events_for_client(
+ self.store,
+ user.to_string(),
+ [event],
+ is_peeking=is_peeking
+ )
+
+ if not filtered:
+ raise AuthError(
+ 403,
+ "You don't have permission to access that event."
+ )
defer.returnValue(event)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 20fb46fc..533b82c7 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -21,8 +21,8 @@ import logging
import sys
import six
-from six import iteritems
-from six.moves import http_client
+from six import iteritems, itervalues
+from six.moves import http_client, zip
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
@@ -76,7 +76,7 @@ class FederationHandler(BaseHandler):
self.hs = hs
self.store = hs.get_datastore()
- self.replication_layer = hs.get_federation_client()
+ self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
@@ -255,7 +255,7 @@ class FederationHandler(BaseHandler):
# know about
for p in prevs - seen:
state, got_auth_chain = (
- yield self.replication_layer.get_state_for_room(
+ yield self.federation_client.get_state_for_room(
origin, pdu.room_id, p
)
)
@@ -338,7 +338,7 @@ class FederationHandler(BaseHandler):
#
# see https://github.com/matrix-org/synapse/pull/1744
- missing_events = yield self.replication_layer.get_missing_events(
+ missing_events = yield self.federation_client.get_missing_events(
origin,
pdu.room_id,
earliest_events_ids=list(latest),
@@ -400,7 +400,7 @@ class FederationHandler(BaseHandler):
)
try:
- event_stream_id, max_stream_id = yield self._persist_auth_tree(
+ yield self._persist_auth_tree(
origin, auth_chain, state, event
)
except AuthError as e:
@@ -444,7 +444,7 @@ class FederationHandler(BaseHandler):
yield self._handle_new_events(origin, event_infos)
try:
- context, event_stream_id, max_stream_id = yield self._handle_new_event(
+ context = yield self._handle_new_event(
origin,
event,
state=state,
@@ -469,24 +469,16 @@ class FederationHandler(BaseHandler):
except StoreError:
logger.exception("Failed to store room.")
- extra_users = []
- if event.type == EventTypes.Member:
- target_user_id = event.state_key
- target_user = UserID.from_string(target_user_id)
- extra_users.append(target_user)
-
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id,
- extra_users=extra_users
- )
-
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
# Only fire user_joined_room if the user has acutally
# joined the room. Don't bother if the user is just
# changing their profile info.
newly_joined = True
- prev_state_id = context.prev_state_ids.get(
+
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+
+ prev_state_id = prev_state_ids.get(
(event.type, event.state_key)
)
if prev_state_id:
@@ -498,7 +490,7 @@ class FederationHandler(BaseHandler):
if newly_joined:
user = UserID.from_string(event.state_key)
- yield user_joined_room(self.distributor, user, event.room_id)
+ yield self.user_joined_room(user, event.room_id)
@log_function
@defer.inlineCallbacks
@@ -519,7 +511,7 @@ class FederationHandler(BaseHandler):
if dest == self.server_name:
raise SynapseError(400, "Can't backfill from self.")
- events = yield self.replication_layer.backfill(
+ events = yield self.federation_client.backfill(
dest,
room_id,
limit=limit,
@@ -567,7 +559,7 @@ class FederationHandler(BaseHandler):
state_events = {}
events_to_state = {}
for e_id in edges:
- state, auth = yield self.replication_layer.get_state_for_room(
+ state, auth = yield self.federation_client.get_state_for_room(
destination=dest,
room_id=room_id,
event_id=e_id
@@ -609,7 +601,7 @@ class FederationHandler(BaseHandler):
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
logcontext.run_in_background(
- self.replication_layer.get_pdu,
+ self.federation_client.get_pdu,
[dest],
event_id,
outlier=True,
@@ -731,7 +723,7 @@ class FederationHandler(BaseHandler):
"""
joined_users = [
(state_key, int(event.depth))
- for (e_type, state_key), event in state.iteritems()
+ for (e_type, state_key), event in iteritems(state)
if e_type == EventTypes.Member
and event.membership == Membership.JOIN
]
@@ -748,7 +740,7 @@ class FederationHandler(BaseHandler):
except Exception:
pass
- return sorted(joined_domains.iteritems(), key=lambda d: d[1])
+ return sorted(joined_domains.items(), key=lambda d: d[1])
curr_domains = get_domains_from_state(curr_state)
@@ -811,7 +803,7 @@ class FederationHandler(BaseHandler):
tried_domains = set(likely_domains)
tried_domains.add(self.server_name)
- event_ids = list(extremities.iterkeys())
+ event_ids = list(extremities.keys())
logger.debug("calling resolve_state_groups in _maybe_backfill")
resolve = logcontext.preserve_fn(
@@ -827,15 +819,15 @@ class FederationHandler(BaseHandler):
states = dict(zip(event_ids, [s.state for s in states]))
state_map = yield self.store.get_events(
- [e_id for ids in states.itervalues() for e_id in ids.itervalues()],
+ [e_id for ids in itervalues(states) for e_id in itervalues(ids)],
get_prev_content=False
)
states = {
key: {
k: state_map[e_id]
- for k, e_id in state_dict.iteritems()
+ for k, e_id in iteritems(state_dict)
if e_id in state_map
- } for key, state_dict in states.iteritems()
+ } for key, state_dict in iteritems(states)
}
for e_id, _ in sorted_extremeties_tuple:
@@ -890,7 +882,7 @@ class FederationHandler(BaseHandler):
Invites must be signed by the invitee's server before distribution.
"""
- pdu = yield self.replication_layer.send_invite(
+ pdu = yield self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
@@ -906,16 +898,6 @@ class FederationHandler(BaseHandler):
[auth_id for auth_id, _ in event.auth_events],
include_given=True
)
-
- for event in auth:
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
- )
- )
-
defer.returnValue([e for e in auth])
@log_function
@@ -949,7 +931,7 @@ class FederationHandler(BaseHandler):
self.room_queues[room_id] = []
- yield self.store.clean_room_for_join(room_id)
+ yield self._clean_room_for_join(room_id)
handled_events = set()
@@ -962,7 +944,7 @@ class FederationHandler(BaseHandler):
target_hosts.insert(0, origin)
except ValueError:
pass
- ret = yield self.replication_layer.send_join(target_hosts, event)
+ ret = yield self.federation_client.send_join(target_hosts, event)
origin = ret["origin"]
state = ret["state"]
@@ -988,15 +970,10 @@ class FederationHandler(BaseHandler):
# FIXME
pass
- event_stream_id, max_stream_id = yield self._persist_auth_tree(
+ yield self._persist_auth_tree(
origin, auth_chain, state, event
)
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id,
- extra_users=[joinee]
- )
-
logger.debug("Finished joining %s to %s", joinee, room_id)
finally:
room_queue = self.room_queues[room_id]
@@ -1091,7 +1068,7 @@ class FederationHandler(BaseHandler):
# would introduce the danger of backwards-compatibility problems.
event.internal_metadata.send_on_behalf_of = origin
- context, event_stream_id, max_stream_id = yield self._handle_new_event(
+ context = yield self._handle_new_event(
origin, event
)
@@ -1101,25 +1078,17 @@ class FederationHandler(BaseHandler):
event.signatures,
)
- extra_users = []
- if event.type == EventTypes.Member:
- target_user_id = event.state_key
- target_user = UserID.from_string(target_user_id)
- extra_users.append(target_user)
-
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id, extra_users=extra_users
- )
-
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
- yield user_joined_room(self.distributor, user, event.room_id)
+ yield self.user_joined_room(user, event.room_id)
+
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
- state_ids = list(context.prev_state_ids.values())
+ state_ids = list(prev_state_ids.values())
auth_chain = yield self.store.get_auth_chain(state_ids)
- state = yield self.store.get_events(list(context.prev_state_ids.values()))
+ state = yield self.store.get_events(list(prev_state_ids.values()))
defer.returnValue({
"state": list(state.values()),
@@ -1181,17 +1150,7 @@ class FederationHandler(BaseHandler):
)
context = yield self.state_handler.compute_event_context(event)
-
- event_stream_id, max_stream_id = yield self.store.persist_event(
- event,
- context=context,
- )
-
- target_user = UserID.from_string(event.state_key)
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id,
- extra_users=[target_user],
- )
+ yield self._persist_events([(event, context)])
defer.returnValue(event)
@@ -1216,30 +1175,20 @@ class FederationHandler(BaseHandler):
except ValueError:
pass
- yield self.replication_layer.send_leave(
+ yield self.federation_client.send_leave(
target_hosts,
event
)
context = yield self.state_handler.compute_event_context(event)
-
- event_stream_id, max_stream_id = yield self.store.persist_event(
- event,
- context=context,
- )
-
- target_user = UserID.from_string(event.state_key)
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id,
- extra_users=[target_user],
- )
+ yield self._persist_events([(event, context)])
defer.returnValue(event)
@defer.inlineCallbacks
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
content={},):
- origin, pdu = yield self.replication_layer.make_membership_event(
+ origin, pdu = yield self.federation_client.make_membership_event(
target_hosts,
room_id,
user_id,
@@ -1284,7 +1233,7 @@ class FederationHandler(BaseHandler):
@log_function
def on_make_leave_request(self, room_id, user_id):
""" We've received a /make_leave/ request, so we create a partial
- join event for the room and return that. We do *not* persist or
+ leave event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
"""
builder = self.event_builder_factory.new({
@@ -1323,7 +1272,7 @@ class FederationHandler(BaseHandler):
event.internal_metadata.outlier = False
- context, event_stream_id, max_stream_id = yield self._handle_new_event(
+ yield self._handle_new_event(
origin, event
)
@@ -1333,22 +1282,17 @@ class FederationHandler(BaseHandler):
event.signatures,
)
- extra_users = []
- if event.type == EventTypes.Member:
- target_user_id = event.state_key
- target_user = UserID.from_string(target_user_id)
- extra_users.append(target_user)
-
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id, extra_users=extra_users
- )
-
defer.returnValue(None)
@defer.inlineCallbacks
def get_state_for_pdu(self, room_id, event_id):
"""Returns the state at the event. i.e. not including said event.
"""
+
+ event = yield self.store.get_event(
+ event_id, allow_none=False, check_room_id=room_id,
+ )
+
state_groups = yield self.store.get_state_groups(
room_id, [event_id]
)
@@ -1359,8 +1303,7 @@ class FederationHandler(BaseHandler):
(e.type, e.state_key): e for e in state
}
- event = yield self.store.get_event(event_id)
- if event and event.is_state():
+ if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
@@ -1371,18 +1314,6 @@ class FederationHandler(BaseHandler):
del results[(event.type, event.state_key)]
res = list(results.values())
- for event in res:
- # We sign these again because there was a bug where we
- # incorrectly signed things the first time round
- if self.is_mine_id(event.event_id):
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
- )
- )
-
defer.returnValue(res)
else:
defer.returnValue([])
@@ -1391,6 +1322,10 @@ class FederationHandler(BaseHandler):
def get_state_ids_for_pdu(self, room_id, event_id):
"""Returns the state at the event. i.e. not including said event.
"""
+ event = yield self.store.get_event(
+ event_id, allow_none=False, check_room_id=room_id,
+ )
+
state_groups = yield self.store.get_state_groups_ids(
room_id, [event_id]
)
@@ -1399,8 +1334,7 @@ class FederationHandler(BaseHandler):
_, state = state_groups.items().pop()
results = state
- event = yield self.store.get_event(event_id)
- if event and event.is_state():
+ if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
@@ -1454,18 +1388,6 @@ class FederationHandler(BaseHandler):
)
if event:
- if self.is_mine_id(event.event_id):
- # FIXME: This is a temporary work around where we occasionally
- # return events slightly differently than when they were
- # originally signed
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
- )
- )
-
in_room = yield self.auth.check_host_in_room(
event.room_id,
origin
@@ -1501,9 +1423,8 @@ class FederationHandler(BaseHandler):
event, context
)
- event_stream_id, max_stream_id = yield self.store.persist_event(
- event,
- context=context,
+ yield self._persist_events(
+ [(event, context)],
backfilled=backfilled,
)
except: # noqa: E722, as we reraise the exception this is fine.
@@ -1516,15 +1437,7 @@ class FederationHandler(BaseHandler):
six.reraise(tp, value, tb)
- if not backfilled:
- # this intentionally does not yield: we don't care about the result
- # and don't need to wait for it.
- logcontext.run_in_background(
- self.pusher_pool.on_new_notifications,
- event_stream_id, max_stream_id,
- )
-
- defer.returnValue((context, event_stream_id, max_stream_id))
+ defer.returnValue(context)
@defer.inlineCallbacks
def _handle_new_events(self, origin, event_infos, backfilled=False):
@@ -1532,6 +1445,8 @@ class FederationHandler(BaseHandler):
should not depend on one another, e.g. this should be used to persist
a bunch of outliers, but not a chunk of individual events that depend
on each other for state calculations.
+
+ Notifies about the events where appropriate.
"""
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
@@ -1546,10 +1461,10 @@ class FederationHandler(BaseHandler):
], consumeErrors=True,
))
- yield self.store.persist_events(
+ yield self._persist_events(
[
(ev_info["event"], context)
- for ev_info, context in itertools.izip(event_infos, contexts)
+ for ev_info, context in zip(event_infos, contexts)
],
backfilled=backfilled,
)
@@ -1558,7 +1473,8 @@ class FederationHandler(BaseHandler):
def _persist_auth_tree(self, origin, auth_events, state, event):
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
- Persists the event seperately.
+ Persists the event separately. Notifies about the persisted events
+ where appropriate.
Will attempt to fetch missing auth events.
@@ -1569,8 +1485,7 @@ class FederationHandler(BaseHandler):
event (Event)
Returns:
- 2-tuple of (event_stream_id, max_stream_id) from the persist_event
- call for `event`
+ Deferred
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
@@ -1596,7 +1511,7 @@ class FederationHandler(BaseHandler):
missing_auth_events.add(e_id)
for e_id in missing_auth_events:
- m_ev = yield self.replication_layer.get_pdu(
+ m_ev = yield self.federation_client.get_pdu(
[origin],
e_id,
outlier=True,
@@ -1634,7 +1549,7 @@ class FederationHandler(BaseHandler):
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
- yield self.store.persist_events(
+ yield self._persist_events(
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
@@ -1645,12 +1560,10 @@ class FederationHandler(BaseHandler):
event, old_state=state
)
- event_stream_id, max_stream_id = yield self.store.persist_event(
- event, new_event_context,
+ yield self._persist_events(
+ [(event, new_event_context)],
)
- defer.returnValue((event_stream_id, max_stream_id))
-
@defer.inlineCallbacks
def _prep_event(self, origin, event, state=None, auth_events=None):
"""
@@ -1669,8 +1582,9 @@ class FederationHandler(BaseHandler):
)
if not auth_events:
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.auth.compute_auth_events(
- event, context.prev_state_ids, for_verification=True,
+ event, prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
@@ -1706,8 +1620,19 @@ class FederationHandler(BaseHandler):
defer.returnValue(context)
@defer.inlineCallbacks
- def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
+ def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
missing):
+ in_room = yield self.auth.check_host_in_room(
+ room_id,
+ origin
+ )
+ if not in_room:
+ raise AuthError(403, "Host not in room.")
+
+ event = yield self.store.get_event(
+ event_id, allow_none=False, check_room_id=room_id
+ )
+
# Just go through and process each event in `remote_auth_chain`. We
# don't want to fall into the trap of `missing` being wrong.
for e in remote_auth_chain:
@@ -1717,7 +1642,6 @@ class FederationHandler(BaseHandler):
pass
# Now get the current auth_chain for the event.
- event = yield self.store.get_event(event_id)
local_auth_chain = yield self.store.get_auth_chain(
[auth_id for auth_id, _ in event.auth_events],
include_given=True
@@ -1730,15 +1654,6 @@ class FederationHandler(BaseHandler):
local_auth_chain, remote_auth_chain
)
- for event in ret["auth_chain"]:
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
- )
- )
-
logger.debug("on_query_auth returning: %s", ret)
defer.returnValue(ret)
@@ -1814,7 +1729,7 @@ class FederationHandler(BaseHandler):
logger.info("Missing auth: %s", missing_auth)
# If we don't have all the auth events, we need to get them.
try:
- remote_auth_chain = yield self.replication_layer.get_event_auth(
+ remote_auth_chain = yield self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
@@ -1919,9 +1834,10 @@ class FederationHandler(BaseHandler):
break
if do_resolution:
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
# 1. Get what we think is the auth chain.
auth_ids = yield self.auth.compute_auth_events(
- event, context.prev_state_ids
+ event, prev_state_ids
)
local_auth_chain = yield self.store.get_auth_chain(
auth_ids, include_given=True
@@ -1929,7 +1845,7 @@ class FederationHandler(BaseHandler):
try:
# 2. Get remote difference.
- result = yield self.replication_layer.query_auth(
+ result = yield self.federation_client.query_auth(
origin,
event.room_id,
event.event_id,
@@ -2011,21 +1927,34 @@ class FederationHandler(BaseHandler):
k: a.event_id for k, a in iteritems(auth_events)
if k != event_key
}
- context.current_state_ids = dict(context.current_state_ids)
- context.current_state_ids.update(state_updates)
- if context.delta_ids is not None:
- context.delta_ids = dict(context.delta_ids)
- context.delta_ids.update(state_updates)
- context.prev_state_ids = dict(context.prev_state_ids)
- context.prev_state_ids.update({
+ current_state_ids = yield context.get_current_state_ids(self.store)
+ current_state_ids = dict(current_state_ids)
+
+ current_state_ids.update(state_updates)
+
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ prev_state_ids = dict(prev_state_ids)
+
+ prev_state_ids.update({
k: a.event_id for k, a in iteritems(auth_events)
})
- context.state_group = yield self.store.store_state_group(
+
+ # create a new state group as a delta from the existing one.
+ prev_group = context.state_group
+ state_group = yield self.store.store_state_group(
event.event_id,
event.room_id,
- prev_group=context.prev_group,
- delta_ids=context.delta_ids,
- current_state_ids=context.current_state_ids,
+ prev_group=prev_group,
+ delta_ids=state_updates,
+ current_state_ids=current_state_ids,
+ )
+
+ yield context.update_state(
+ state_group=state_group,
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
+ prev_group=prev_group,
+ delta_ids=state_updates,
)
@defer.inlineCallbacks
@@ -2215,7 +2144,7 @@ class FederationHandler(BaseHandler):
yield member_handler.send_membership_event(None, event, context)
else:
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
- yield self.replication_layer.forward_third_party_invite(
+ yield self.federation_client.forward_third_party_invite(
destinations,
room_id,
event_dict,
@@ -2265,7 +2194,8 @@ class FederationHandler(BaseHandler):
event.content["third_party_invite"]["signed"]["token"]
)
original_invite = None
- original_invite_id = context.prev_state_ids.get(key)
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ original_invite_id = prev_state_ids.get(key)
if original_invite_id:
original_invite = yield self.store.get_event(
original_invite_id, allow_none=True
@@ -2307,7 +2237,8 @@ class FederationHandler(BaseHandler):
signed = event.content["third_party_invite"]["signed"]
token = signed["token"]
- invite_event_id = context.prev_state_ids.get(
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ invite_event_id = prev_state_ids.get(
(EventTypes.ThirdPartyInvite, token,)
)
@@ -2368,3 +2299,69 @@ class FederationHandler(BaseHandler):
)
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
+
+ @defer.inlineCallbacks
+ def _persist_events(self, event_and_contexts, backfilled=False):
+ """Persists events and tells the notifier/pushers about them, if
+ necessary.
+
+ Args:
+ event_and_contexts(list[tuple[FrozenEvent, EventContext]])
+ backfilled (bool): Whether these events are a result of
+ backfilling or not
+
+ Returns:
+ Deferred
+ """
+ max_stream_id = yield self.store.persist_events(
+ event_and_contexts,
+ backfilled=backfilled,
+ )
+
+ if not backfilled: # Never notify for backfilled events
+ for event, _ in event_and_contexts:
+ self._notify_persisted_event(event, max_stream_id)
+
+ def _notify_persisted_event(self, event, max_stream_id):
+ """Checks to see if notifier/pushers should be notified about the
+ event or not.
+
+ Args:
+ event (FrozenEvent)
+ max_stream_id (int): The max_stream_id returned by persist_events
+ """
+
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+
+ # We notify for memberships if its an invite for one of our
+ # users
+ if event.internal_metadata.is_outlier():
+ if event.membership != Membership.INVITE:
+ if not self.is_mine_id(target_user_id):
+ return
+
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
+ elif event.internal_metadata.is_outlier():
+ return
+
+ event_stream_id = event.internal_metadata.stream_ordering
+ self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id,
+ extra_users=extra_users
+ )
+
+ logcontext.run_in_background(
+ self.pusher_pool.on_new_notifications,
+ event_stream_id, max_stream_id,
+ )
+
+ def _clean_room_for_join(self, room_id):
+ return self.store.clean_room_for_join(room_id)
+
+ def user_joined_room(self, user, room_id):
+ """Called when a new user has joined the room
+ """
+ return user_joined_room(self.distributor, user, room_id)
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 8c8aedb2..1d36d967 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -26,7 +26,7 @@ from twisted.internet import defer
from synapse.api.errors import (
CodeMessageException,
Codes,
- MatrixCodeMessageException,
+ HttpResponseException,
SynapseError,
)
@@ -85,7 +85,6 @@ class IdentityHandler(BaseHandler):
)
defer.returnValue(None)
- data = {}
try:
data = yield self.http_client.get_json(
"https://%s%s" % (
@@ -94,11 +93,9 @@ class IdentityHandler(BaseHandler):
),
{'sid': creds['sid'], 'client_secret': client_secret}
)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
logger.info("getValidated3pid failed with Matrix error: %r", e)
- raise SynapseError(e.code, e.msg, e.errcode)
- except CodeMessageException as e:
- data = json.loads(e.msg)
+ raise e.to_synapse_error()
if 'medium' in data:
defer.returnValue(data)
@@ -136,7 +133,7 @@ class IdentityHandler(BaseHandler):
)
logger.debug("bound threepid %r to %s", creds, mxid)
except CodeMessageException as e:
- data = json.loads(e.msg)
+ data = json.loads(e.msg) # XXX WAT?
defer.returnValue(data)
@defer.inlineCallbacks
@@ -209,12 +206,9 @@ class IdentityHandler(BaseHandler):
params
)
defer.returnValue(data)
- except MatrixCodeMessageException as e:
- logger.info("Proxied requestToken failed with Matrix error: %r", e)
- raise SynapseError(e.code, e.msg, e.errcode)
- except CodeMessageException as e:
+ except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
- raise e
+ raise e.to_synapse_error()
@defer.inlineCallbacks
def requestMsisdnToken(
@@ -244,9 +238,6 @@ class IdentityHandler(BaseHandler):
params
)
defer.returnValue(data)
- except MatrixCodeMessageException as e:
- logger.info("Proxied requestToken failed with Matrix error: %r", e)
- raise SynapseError(e.code, e.msg, e.errcode)
- except CodeMessageException as e:
+ except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
- raise e
+ raise e.to_synapse_error()
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index fb11716e..40e7580a 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -148,13 +148,15 @@ class InitialSyncHandler(BaseHandler):
try:
if event.membership == Membership.JOIN:
room_end_token = now_token.room_key
- deferred_room_state = self.state_handler.get_current_state(
- event.room_id
+ deferred_room_state = run_in_background(
+ self.state_handler.get_current_state,
+ event.room_id,
)
elif event.membership == Membership.LEAVE:
room_end_token = "s%d" % (event.stream_ordering,)
- deferred_room_state = self.store.get_state_for_events(
- [event.event_id], None
+ deferred_room_state = run_in_background(
+ self.store.get_state_for_events,
+ [event.event_id], None,
)
deferred_room_state.addCallback(
lambda states: states[event.event_id]
@@ -387,19 +389,21 @@ class InitialSyncHandler(BaseHandler):
receipts = []
defer.returnValue(receipts)
- presence, receipts, (messages, token) = yield defer.gatherResults(
- [
- run_in_background(get_presence),
- run_in_background(get_receipts),
- run_in_background(
- self.store.get_recent_events_for_room,
- room_id,
- limit=limit,
- end_token=now_token.room_key,
- )
- ],
- consumeErrors=True,
- ).addErrback(unwrapFirstError)
+ presence, receipts, (messages, token) = yield make_deferred_yieldable(
+ defer.gatherResults(
+ [
+ run_in_background(get_presence),
+ run_in_background(get_receipts),
+ run_in_background(
+ self.store.get_recent_events_for_room,
+ room_id,
+ limit=limit,
+ end_token=now_token.room_key,
+ )
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError),
+ )
messages = yield filter_events_for_client(
self.store, user_id, messages, is_peeking=is_peeking,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index a39b852c..39d77247 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
from twisted.internet.defer import succeed
-from twisted.python.failure import Failure
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
@@ -32,247 +31,26 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.replication.http.send_event import send_event_to_master
-from synapse.types import RoomAlias, RoomStreamToken, UserID
-from synapse.util.async import Limiter, ReadWriteLock
+from synapse.types import RoomAlias, UserID
+from synapse.util.async import Linearizer
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import run_in_background
from synapse.util.metrics import measure_func
-from synapse.util.stringutils import random_string
-from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
logger = logging.getLogger(__name__)
-class PurgeStatus(object):
- """Object tracking the status of a purge request
-
- This class contains information on the progress of a purge request, for
- return by get_purge_status.
-
- Attributes:
- status (int): Tracks whether this request has completed. One of
- STATUS_{ACTIVE,COMPLETE,FAILED}
+class MessageHandler(object):
+ """Contains some read only APIs to get state about a room
"""
- STATUS_ACTIVE = 0
- STATUS_COMPLETE = 1
- STATUS_FAILED = 2
-
- STATUS_TEXT = {
- STATUS_ACTIVE: "active",
- STATUS_COMPLETE: "complete",
- STATUS_FAILED: "failed",
- }
-
- def __init__(self):
- self.status = PurgeStatus.STATUS_ACTIVE
-
- def asdict(self):
- return {
- "status": PurgeStatus.STATUS_TEXT[self.status]
- }
-
-
-class MessageHandler(BaseHandler):
-
def __init__(self, hs):
- super(MessageHandler, self).__init__(hs)
- self.hs = hs
- self.state = hs.get_state_handler()
+ self.auth = hs.get_auth()
self.clock = hs.get_clock()
-
- self.pagination_lock = ReadWriteLock()
- self._purges_in_progress_by_room = set()
- # map from purge id to PurgeStatus
- self._purges_by_id = {}
-
- def start_purge_history(self, room_id, token,
- delete_local_events=False):
- """Start off a history purge on a room.
-
- Args:
- room_id (str): The room to purge from
-
- token (str): topological token to delete events before
- delete_local_events (bool): True to delete local events as well as
- remote ones
-
- Returns:
- str: unique ID for this purge transaction.
- """
- if room_id in self._purges_in_progress_by_room:
- raise SynapseError(
- 400,
- "History purge already in progress for %s" % (room_id, ),
- )
-
- purge_id = random_string(16)
-
- # we log the purge_id here so that it can be tied back to the
- # request id in the log lines.
- logger.info("[purge] starting purge_id %s", purge_id)
-
- self._purges_by_id[purge_id] = PurgeStatus()
- run_in_background(
- self._purge_history,
- purge_id, room_id, token, delete_local_events,
- )
- return purge_id
-
- @defer.inlineCallbacks
- def _purge_history(self, purge_id, room_id, token,
- delete_local_events):
- """Carry out a history purge on a room.
-
- Args:
- purge_id (str): The id for this purge
- room_id (str): The room to purge from
- token (str): topological token to delete events before
- delete_local_events (bool): True to delete local events as well as
- remote ones
-
- Returns:
- Deferred
- """
- self._purges_in_progress_by_room.add(room_id)
- try:
- with (yield self.pagination_lock.write(room_id)):
- yield self.store.purge_history(
- room_id, token, delete_local_events,
- )
- logger.info("[purge] complete")
- self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
- except Exception:
- logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
- self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
- finally:
- self._purges_in_progress_by_room.discard(room_id)
-
- # remove the purge from the list 24 hours after it completes
- def clear_purge():
- del self._purges_by_id[purge_id]
- self.hs.get_reactor().callLater(24 * 3600, clear_purge)
-
- def get_purge_status(self, purge_id):
- """Get the current status of an active purge
-
- Args:
- purge_id (str): purge_id returned by start_purge_history
-
- Returns:
- PurgeStatus|None
- """
- return self._purges_by_id.get(purge_id)
-
- @defer.inlineCallbacks
- def get_messages(self, requester, room_id=None, pagin_config=None,
- as_client_event=True, event_filter=None):
- """Get messages in a room.
-
- Args:
- requester (Requester): The user requesting messages.
- room_id (str): The room they want messages from.
- pagin_config (synapse.api.streams.PaginationConfig): The pagination
- config rules to apply, if any.
- as_client_event (bool): True to get events in client-server format.
- event_filter (Filter): Filter to apply to results or None
- Returns:
- dict: Pagination API results
- """
- user_id = requester.user.to_string()
-
- if pagin_config.from_token:
- room_token = pagin_config.from_token.room_key
- else:
- pagin_config.from_token = (
- yield self.hs.get_event_sources().get_current_token_for_room(
- room_id=room_id
- )
- )
- room_token = pagin_config.from_token.room_key
-
- room_token = RoomStreamToken.parse(room_token)
-
- pagin_config.from_token = pagin_config.from_token.copy_and_replace(
- "room_key", str(room_token)
- )
-
- source_config = pagin_config.get_source_config("room")
-
- with (yield self.pagination_lock.read(room_id)):
- membership, member_event_id = yield self._check_in_room_or_world_readable(
- room_id, user_id
- )
-
- if source_config.direction == 'b':
- # if we're going backwards, we might need to backfill. This
- # requires that we have a topo token.
- if room_token.topological:
- max_topo = room_token.topological
- else:
- max_topo = yield self.store.get_max_topological_token(
- room_id, room_token.stream
- )
-
- if membership == Membership.LEAVE:
- # If they have left the room then clamp the token to be before
- # they left the room, to save the effort of loading from the
- # database.
- leave_token = yield self.store.get_topological_token_for_event(
- member_event_id
- )
- leave_token = RoomStreamToken.parse(leave_token)
- if leave_token.topological < max_topo:
- source_config.from_key = str(leave_token)
-
- yield self.hs.get_handlers().federation_handler.maybe_backfill(
- room_id, max_topo
- )
-
- events, next_key = yield self.store.paginate_room_events(
- room_id=room_id,
- from_key=source_config.from_key,
- to_key=source_config.to_key,
- direction=source_config.direction,
- limit=source_config.limit,
- event_filter=event_filter,
- )
-
- next_token = pagin_config.from_token.copy_and_replace(
- "room_key", next_key
- )
-
- if not events:
- defer.returnValue({
- "chunk": [],
- "start": pagin_config.from_token.to_string(),
- "end": next_token.to_string(),
- })
-
- if event_filter:
- events = event_filter.filter(events)
-
- events = yield filter_events_for_client(
- self.store,
- user_id,
- events,
- is_peeking=(member_event_id is None),
- )
-
- time_now = self.clock.time_msec()
-
- chunk = {
- "chunk": [
- serialize_event(e, time_now, as_client_event)
- for e in events
- ],
- "start": pagin_config.from_token.to_string(),
- "end": next_token.to_string(),
- }
-
- defer.returnValue(chunk)
+ self.state = hs.get_state_handler()
+ self.store = hs.get_datastore()
@defer.inlineCallbacks
def get_room_data(self, user_id=None, room_id=None,
@@ -286,12 +64,12 @@ class MessageHandler(BaseHandler):
Raises:
SynapseError if something went wrong.
"""
- membership, membership_event_id = yield self._check_in_room_or_world_readable(
+ membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
room_id, user_id
)
if membership == Membership.JOIN:
- data = yield self.state_handler.get_current_state(
+ data = yield self.state.get_current_state(
room_id, event_type, state_key
)
elif membership == Membership.LEAVE:
@@ -304,31 +82,6 @@ class MessageHandler(BaseHandler):
defer.returnValue(data)
@defer.inlineCallbacks
- def _check_in_room_or_world_readable(self, room_id, user_id):
- try:
- # check_user_was_in_room will return the most recent membership
- # event for the user if:
- # * The user is a non-guest user, and was ever in the room
- # * The user is a guest user, and has joined the room
- # else it will throw.
- member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
- defer.returnValue((member_event.membership, member_event.event_id))
- return
- except AuthError:
- visibility = yield self.state_handler.get_current_state(
- room_id, EventTypes.RoomHistoryVisibility, ""
- )
- if (
- visibility and
- visibility.content["history_visibility"] == "world_readable"
- ):
- defer.returnValue((Membership.JOIN, None))
- return
- raise AuthError(
- 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
- )
-
- @defer.inlineCallbacks
def get_state_events(self, user_id, room_id, is_guest=False):
"""Retrieve all state events for a given room. If the user is
joined to the room then return the current state. If the user has
@@ -340,12 +93,12 @@ class MessageHandler(BaseHandler):
Returns:
A list of dicts representing state events. [{}, {}, {}]
"""
- membership, membership_event_id = yield self._check_in_room_or_world_readable(
+ membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
room_id, user_id
)
if membership == Membership.JOIN:
- room_state = yield self.state_handler.get_current_state(room_id)
+ room_state = yield self.state.get_current_state(room_id)
elif membership == Membership.LEAVE:
room_state = yield self.store.get_state_for_events(
[membership_event_id], None
@@ -373,7 +126,7 @@ class MessageHandler(BaseHandler):
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
- membership, _ = yield self._check_in_room_or_world_readable(
+ membership, _ = yield self.auth.check_in_room_or_world_readable(
room_id, user_id
)
if membership != Membership.JOIN:
@@ -427,7 +180,7 @@ class EventCreationHandler(object):
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
- self.limiter = Limiter(max_count=5)
+ self.limiter = Linearizer(max_count=5, name="room_event_creation_limit")
self.action_generator = hs.get_action_generator()
@@ -630,7 +383,8 @@ class EventCreationHandler(object):
If so, returns the version of the event in context.
Otherwise, returns None.
"""
- prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ prev_event_id = prev_state_ids.get((event.type, event.state_key))
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
if not prev_event:
return
@@ -752,8 +506,8 @@ class EventCreationHandler(object):
event = builder.build()
logger.debug(
- "Created event %s with state: %s",
- event.event_id, context.prev_state_ids,
+ "Created event %s",
+ event.event_id,
)
defer.returnValue(
@@ -806,8 +560,9 @@ class EventCreationHandler(object):
# If we're a worker we need to hit out to the master.
if self.config.worker_app:
yield send_event_to_master(
- self.hs.get_clock(),
- self.http_client,
+ clock=self.hs.get_clock(),
+ store=self.store,
+ client=self.http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
@@ -884,9 +639,11 @@ class EventCreationHandler(object):
e.sender == event.sender
)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
state_to_include_ids = [
e_id
- for k, e_id in iteritems(context.current_state_ids)
+ for k, e_id in iteritems(current_state_ids)
if k[0] in self.hs.config.room_invite_state_types
or k == (EventTypes.Member, event.sender)
]
@@ -922,8 +679,9 @@ class EventCreationHandler(object):
)
if event.type == EventTypes.Redaction:
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.auth.compute_auth_events(
- event, context.prev_state_ids, for_verification=True,
+ event, prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
@@ -943,11 +701,13 @@ class EventCreationHandler(object):
"You don't have permission to redact events"
)
- if event.type == EventTypes.Create and context.prev_state_ids:
- raise AuthError(
- 403,
- "Changing the room create event is forbidden",
- )
+ if event.type == EventTypes.Create:
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ if prev_state_ids:
+ raise AuthError(
+ 403,
+ "Changing the room create event is forbidden",
+ )
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
new file mode 100644
index 00000000..b2849783
--- /dev/null
+++ b/synapse/handlers/pagination.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright 2017 - 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+from twisted.python.failure import Failure
+
+from synapse.api.constants import Membership
+from synapse.api.errors import SynapseError
+from synapse.events.utils import serialize_event
+from synapse.types import RoomStreamToken
+from synapse.util.async import ReadWriteLock
+from synapse.util.logcontext import run_in_background
+from synapse.util.stringutils import random_string
+from synapse.visibility import filter_events_for_client
+
+logger = logging.getLogger(__name__)
+
+
+class PurgeStatus(object):
+ """Object tracking the status of a purge request
+
+ This class contains information on the progress of a purge request, for
+ return by get_purge_status.
+
+ Attributes:
+ status (int): Tracks whether this request has completed. One of
+ STATUS_{ACTIVE,COMPLETE,FAILED}
+ """
+
+ STATUS_ACTIVE = 0
+ STATUS_COMPLETE = 1
+ STATUS_FAILED = 2
+
+ STATUS_TEXT = {
+ STATUS_ACTIVE: "active",
+ STATUS_COMPLETE: "complete",
+ STATUS_FAILED: "failed",
+ }
+
+ def __init__(self):
+ self.status = PurgeStatus.STATUS_ACTIVE
+
+ def asdict(self):
+ return {
+ "status": PurgeStatus.STATUS_TEXT[self.status]
+ }
+
+
+class PaginationHandler(object):
+ """Handles pagination and purge history requests.
+
+ These are in the same handler due to the fact we need to block clients
+ paginating during a purge.
+ """
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+
+ self.pagination_lock = ReadWriteLock()
+ self._purges_in_progress_by_room = set()
+ # map from purge id to PurgeStatus
+ self._purges_by_id = {}
+
+ def start_purge_history(self, room_id, token,
+ delete_local_events=False):
+ """Start off a history purge on a room.
+
+ Args:
+ room_id (str): The room to purge from
+
+ token (str): topological token to delete events before
+ delete_local_events (bool): True to delete local events as well as
+ remote ones
+
+ Returns:
+ str: unique ID for this purge transaction.
+ """
+ if room_id in self._purges_in_progress_by_room:
+ raise SynapseError(
+ 400,
+ "History purge already in progress for %s" % (room_id, ),
+ )
+
+ purge_id = random_string(16)
+
+ # we log the purge_id here so that it can be tied back to the
+ # request id in the log lines.
+ logger.info("[purge] starting purge_id %s", purge_id)
+
+ self._purges_by_id[purge_id] = PurgeStatus()
+ run_in_background(
+ self._purge_history,
+ purge_id, room_id, token, delete_local_events,
+ )
+ return purge_id
+
+ @defer.inlineCallbacks
+ def _purge_history(self, purge_id, room_id, token,
+ delete_local_events):
+ """Carry out a history purge on a room.
+
+ Args:
+ purge_id (str): The id for this purge
+ room_id (str): The room to purge from
+ token (str): topological token to delete events before
+ delete_local_events (bool): True to delete local events as well as
+ remote ones
+
+ Returns:
+ Deferred
+ """
+ self._purges_in_progress_by_room.add(room_id)
+ try:
+ with (yield self.pagination_lock.write(room_id)):
+ yield self.store.purge_history(
+ room_id, token, delete_local_events,
+ )
+ logger.info("[purge] complete")
+ self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
+ except Exception:
+ logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
+ self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
+ finally:
+ self._purges_in_progress_by_room.discard(room_id)
+
+ # remove the purge from the list 24 hours after it completes
+ def clear_purge():
+ del self._purges_by_id[purge_id]
+ self.hs.get_reactor().callLater(24 * 3600, clear_purge)
+
+ def get_purge_status(self, purge_id):
+ """Get the current status of an active purge
+
+ Args:
+ purge_id (str): purge_id returned by start_purge_history
+
+ Returns:
+ PurgeStatus|None
+ """
+ return self._purges_by_id.get(purge_id)
+
+ @defer.inlineCallbacks
+ def get_messages(self, requester, room_id=None, pagin_config=None,
+ as_client_event=True, event_filter=None):
+ """Get messages in a room.
+
+ Args:
+ requester (Requester): The user requesting messages.
+ room_id (str): The room they want messages from.
+ pagin_config (synapse.api.streams.PaginationConfig): The pagination
+ config rules to apply, if any.
+ as_client_event (bool): True to get events in client-server format.
+ event_filter (Filter): Filter to apply to results or None
+ Returns:
+ dict: Pagination API results
+ """
+ user_id = requester.user.to_string()
+
+ if pagin_config.from_token:
+ room_token = pagin_config.from_token.room_key
+ else:
+ pagin_config.from_token = (
+ yield self.hs.get_event_sources().get_current_token_for_room(
+ room_id=room_id
+ )
+ )
+ room_token = pagin_config.from_token.room_key
+
+ room_token = RoomStreamToken.parse(room_token)
+
+ pagin_config.from_token = pagin_config.from_token.copy_and_replace(
+ "room_key", str(room_token)
+ )
+
+ source_config = pagin_config.get_source_config("room")
+
+ with (yield self.pagination_lock.read(room_id)):
+ membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
+ room_id, user_id
+ )
+
+ if source_config.direction == 'b':
+ # if we're going backwards, we might need to backfill. This
+ # requires that we have a topo token.
+ if room_token.topological:
+ max_topo = room_token.topological
+ else:
+ max_topo = yield self.store.get_max_topological_token(
+ room_id, room_token.stream
+ )
+
+ if membership == Membership.LEAVE:
+ # If they have left the room then clamp the token to be before
+ # they left the room, to save the effort of loading from the
+ # database.
+ leave_token = yield self.store.get_topological_token_for_event(
+ member_event_id
+ )
+ leave_token = RoomStreamToken.parse(leave_token)
+ if leave_token.topological < max_topo:
+ source_config.from_key = str(leave_token)
+
+ yield self.hs.get_handlers().federation_handler.maybe_backfill(
+ room_id, max_topo
+ )
+
+ events, next_key = yield self.store.paginate_room_events(
+ room_id=room_id,
+ from_key=source_config.from_key,
+ to_key=source_config.to_key,
+ direction=source_config.direction,
+ limit=source_config.limit,
+ event_filter=event_filter,
+ )
+
+ next_token = pagin_config.from_token.copy_and_replace(
+ "room_key", next_key
+ )
+
+ if not events:
+ defer.returnValue({
+ "chunk": [],
+ "start": pagin_config.from_token.to_string(),
+ "end": next_token.to_string(),
+ })
+
+ if event_filter:
+ events = event_filter.filter(events)
+
+ events = yield filter_events_for_client(
+ self.store,
+ user_id,
+ events,
+ is_peeking=(member_event_id is None),
+ )
+
+ time_now = self.clock.time_msec()
+
+ chunk = {
+ "chunk": [
+ serialize_event(e, time_now, as_client_event)
+ for e in events
+ ],
+ "start": pagin_config.from_token.to_string(),
+ "end": next_token.to_string(),
+ }
+
+ defer.returnValue(chunk)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 859f6d2b..9af2e8f8 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -17,7 +17,14 @@ import logging
from twisted.internet import defer
-from synapse.api.errors import AuthError, CodeMessageException, SynapseError
+from synapse.api.errors import (
+ AuthError,
+ CodeMessageException,
+ Codes,
+ StoreError,
+ SynapseError,
+)
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, get_domain_from_id
from ._base import BaseHandler
@@ -41,19 +48,24 @@ class ProfileHandler(BaseHandler):
if hs.config.worker_app is None:
self.clock.looping_call(
- self._update_remote_profile_cache, self.PROFILE_UPDATE_MS,
+ self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
)
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
- displayname = yield self.store.get_profile_displayname(
- target_user.localpart
- )
- avatar_url = yield self.store.get_profile_avatar_url(
- target_user.localpart
- )
+ try:
+ displayname = yield self.store.get_profile_displayname(
+ target_user.localpart
+ )
+ avatar_url = yield self.store.get_profile_avatar_url(
+ target_user.localpart
+ )
+ except StoreError as e:
+ if e.code == 404:
+ raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+ raise
defer.returnValue({
"displayname": displayname,
@@ -73,7 +85,6 @@ class ProfileHandler(BaseHandler):
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get displayname")
-
raise
@defer.inlineCallbacks
@@ -84,12 +95,17 @@ class ProfileHandler(BaseHandler):
"""
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
- displayname = yield self.store.get_profile_displayname(
- target_user.localpart
- )
- avatar_url = yield self.store.get_profile_avatar_url(
- target_user.localpart
- )
+ try:
+ displayname = yield self.store.get_profile_displayname(
+ target_user.localpart
+ )
+ avatar_url = yield self.store.get_profile_avatar_url(
+ target_user.localpart
+ )
+ except StoreError as e:
+ if e.code == 404:
+ raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+ raise
defer.returnValue({
"displayname": displayname,
@@ -102,9 +118,14 @@ class ProfileHandler(BaseHandler):
@defer.inlineCallbacks
def get_displayname(self, target_user):
if self.hs.is_mine(target_user):
- displayname = yield self.store.get_profile_displayname(
- target_user.localpart
- )
+ try:
+ displayname = yield self.store.get_profile_displayname(
+ target_user.localpart
+ )
+ except StoreError as e:
+ if e.code == 404:
+ raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+ raise
defer.returnValue(displayname)
else:
@@ -121,7 +142,6 @@ class ProfileHandler(BaseHandler):
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get displayname")
-
raise
except Exception:
logger.exception("Failed to get displayname")
@@ -156,10 +176,14 @@ class ProfileHandler(BaseHandler):
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
if self.hs.is_mine(target_user):
- avatar_url = yield self.store.get_profile_avatar_url(
- target_user.localpart
- )
-
+ try:
+ avatar_url = yield self.store.get_profile_avatar_url(
+ target_user.localpart
+ )
+ except StoreError as e:
+ if e.code == 404:
+ raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+ raise
defer.returnValue(avatar_url)
else:
try:
@@ -212,16 +236,20 @@ class ProfileHandler(BaseHandler):
just_field = args.get("field", None)
response = {}
+ try:
+ if just_field is None or just_field == "displayname":
+ response["displayname"] = yield self.store.get_profile_displayname(
+ user.localpart
+ )
- if just_field is None or just_field == "displayname":
- response["displayname"] = yield self.store.get_profile_displayname(
- user.localpart
- )
-
- if just_field is None or just_field == "avatar_url":
- response["avatar_url"] = yield self.store.get_profile_avatar_url(
- user.localpart
- )
+ if just_field is None or just_field == "avatar_url":
+ response["avatar_url"] = yield self.store.get_profile_avatar_url(
+ user.localpart
+ )
+ except StoreError as e:
+ if e.code == 404:
+ raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
+ raise
defer.returnValue(response)
@@ -254,6 +282,12 @@ class ProfileHandler(BaseHandler):
room_id, str(e.message)
)
+ def _start_update_remote_profile_cache(self):
+ return run_as_background_process(
+ "Update remote profile", self._update_remote_profile_cache,
+ )
+
+ @defer.inlineCallbacks
def _update_remote_profile_cache(self):
"""Called periodically to check profiles of remote users we haven't
checked in a while.
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 7caff0cb..289704b2 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -45,7 +45,7 @@ class RegistrationHandler(BaseHandler):
hs (synapse.server.HomeServer):
"""
super(RegistrationHandler, self).__init__(hs)
-
+ self.hs = hs
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
@@ -131,7 +131,7 @@ class RegistrationHandler(BaseHandler):
Args:
localpart : The local part of the user ID to register. If None,
one will be generated.
- password (str) : The password to assign to this user so they can
+ password (unicode) : The password to assign to this user so they can
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
generate_token (bool): Whether a new access token should be
@@ -144,6 +144,7 @@ class RegistrationHandler(BaseHandler):
Raises:
RegistrationError if there was a problem registering.
"""
+ yield self._check_mau_limits()
password_hash = None
if password:
password_hash = yield self.auth_handler().hash(password)
@@ -288,6 +289,7 @@ class RegistrationHandler(BaseHandler):
400,
"User ID can only contain characters a-z, 0-9, or '=_-./'",
)
+ yield self._check_mau_limits()
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
@@ -437,7 +439,7 @@ class RegistrationHandler(BaseHandler):
"""
if localpart is None:
raise SynapseError(400, "Request must include user id")
-
+ yield self._check_mau_limits()
need_register = True
try:
@@ -531,3 +533,16 @@ class RegistrationHandler(BaseHandler):
remote_room_hosts=remote_room_hosts,
action="join",
)
+
+ @defer.inlineCallbacks
+ def _check_mau_limits(self):
+ """
+ Do not accept registrations if monthly active user limits exceeded
+ and limiting is enabled
+ """
+ if self.hs.config.limit_usage_by_mau is True:
+ current_mau = yield self.store.count_monthly_users()
+ if current_mau >= self.hs.config.max_mau_value:
+ raise RegistrationError(
+ 403, "MAU Limit Exceeded", Codes.MAU_LIMIT_EXCEEDED
+ )
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index f6751207..7b7804d9 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -15,6 +15,7 @@
# limitations under the License.
"""Contains functions for performing events on rooms."""
+import itertools
import logging
import math
import string
@@ -24,7 +25,7 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
-from synapse.types import RoomAlias, RoomID, RoomStreamToken, UserID
+from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
from synapse.visibility import filter_events_for_client
@@ -395,9 +396,13 @@ class RoomCreationHandler(BaseHandler):
)
-class RoomContextHandler(BaseHandler):
+class RoomContextHandler(object):
+ def __init__(self, hs):
+ self.hs = hs
+ self.store = hs.get_datastore()
+
@defer.inlineCallbacks
- def get_event_context(self, user, room_id, event_id, limit):
+ def get_event_context(self, user, room_id, event_id, limit, event_filter):
"""Retrieves events, pagination tokens and state around a given event
in a room.
@@ -407,6 +412,8 @@ class RoomContextHandler(BaseHandler):
event_id (str)
limit (int): The maximum number of events to return in total
(excluding state).
+ event_filter (Filter|None): the filter to apply to the events returned
+ (excluding the target event_id)
Returns:
dict, or None if the event isn't found
@@ -414,8 +421,6 @@ class RoomContextHandler(BaseHandler):
before_limit = math.floor(limit / 2.)
after_limit = limit - before_limit
- now_token = yield self.hs.get_event_sources().get_current_token()
-
users = yield self.store.get_users_in_room(room_id)
is_peeking = user.to_string() not in users
@@ -441,7 +446,7 @@ class RoomContextHandler(BaseHandler):
)
results = yield self.store.get_events_around(
- room_id, event_id, before_limit, after_limit
+ room_id, event_id, before_limit, after_limit, event_filter
)
results["events_before"] = yield filter_evts(results["events_before"])
@@ -453,16 +458,35 @@ class RoomContextHandler(BaseHandler):
else:
last_event_id = event_id
+ types = None
+ filtered_types = None
+ if event_filter and event_filter.lazy_load_members():
+ members = set(ev.sender for ev in itertools.chain(
+ results["events_before"],
+ (results["event"],),
+ results["events_after"],
+ ))
+ filtered_types = [EventTypes.Member]
+ types = [(EventTypes.Member, member) for member in members]
+
+ # XXX: why do we return the state as of the last event rather than the
+ # first? Shouldn't we be consistent with /sync?
+ # https://github.com/matrix-org/matrix-doc/issues/687
+
state = yield self.store.get_state_for_events(
- [last_event_id], None
+ [last_event_id], types, filtered_types=filtered_types,
)
results["state"] = list(state[last_event_id].values())
- results["start"] = now_token.copy_and_replace(
+ # We use a dummy token here as we only care about the room portion of
+ # the token, which we replace.
+ token = StreamToken.START
+
+ results["start"] = token.copy_and_replace(
"room_key", results["start"]
).to_string()
- results["end"] = now_token.copy_and_replace(
+ results["end"] = token.copy_and_replace(
"room_key", results["end"]
).to_string()
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 00f2e279..0d4a3f46 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -201,7 +201,9 @@ class RoomMemberHandler(object):
ratelimit=ratelimit,
)
- prev_member_event_id = context.prev_state_ids.get(
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+
+ prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, target.to_string()),
None
)
@@ -496,9 +498,10 @@ class RoomMemberHandler(object):
if prev_event is not None:
return
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
if event.membership == Membership.JOIN:
if requester.is_guest:
- guest_can_join = yield self._can_guest_join(context.prev_state_ids)
+ guest_can_join = yield self._can_guest_join(prev_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
@@ -517,7 +520,7 @@ class RoomMemberHandler(object):
ratelimit=ratelimit,
)
- prev_member_event_id = context.prev_state_ids.get(
+ prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key),
None
)
@@ -705,6 +708,10 @@ class RoomMemberHandler(object):
inviter_display_name = member_event.content.get("displayname", "")
inviter_avatar_url = member_event.content.get("avatar_url", "")
+ # if user has no display name, default to their MXID
+ if not inviter_display_name:
+ inviter_display_name = user.to_string()
+
canonical_room_alias = ""
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event:
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 69ae9731..c464adbd 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -287,7 +287,7 @@ class SearchHandler(BaseHandler):
contexts = {}
for event in allowed_events:
res = yield self.store.get_events_around(
- event.room_id, event.event_id, before_limit, after_limit
+ event.room_id, event.event_id, before_limit, after_limit,
)
logger.info(
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index c24e3536..dff1f67d 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 - 2016 OpenMarket Ltd
+# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,6 +26,8 @@ from synapse.api.constants import EventTypes, Membership
from synapse.push.clientformat import format_push_rules_for_user
from synapse.types import RoomStreamToken
from synapse.util.async import concurrently_execute
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import LoggingContext
from synapse.util.metrics import Measure, measure_func
@@ -32,6 +35,14 @@ from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
+# Store the cache that tracks which lazy-loaded members have been sent to a given
+# client for no more than 30 minutes.
+LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
+
+# Remember the last 100 members we sent to a client for the purposes of
+# avoiding redundantly sending the same lazy-loaded members to the client
+LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
+
SyncConfig = collections.namedtuple("SyncConfig", [
"user",
@@ -181,6 +192,12 @@ class SyncHandler(object):
self.response_cache = ResponseCache(hs, "sync")
self.state = hs.get_state_handler()
+ # ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
+ self.lazy_loaded_members_cache = ExpiringCache(
+ "lazy_loaded_members_cache", self.clock,
+ max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
+ )
+
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
full_state=False):
"""Get the sync for a client if we have new data for it now. Otherwise
@@ -416,29 +433,44 @@ class SyncHandler(object):
))
@defer.inlineCallbacks
- def get_state_after_event(self, event):
+ def get_state_after_event(self, event, types=None, filtered_types=None):
"""
Get the room state after the given event
Args:
event(synapse.events.EventBase): event of interest
+ types(list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ May be None, which matches any key.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
A Deferred map from ((type, state_key)->Event)
"""
- state_ids = yield self.store.get_state_ids_for_event(event.event_id)
+ state_ids = yield self.store.get_state_ids_for_event(
+ event.event_id, types, filtered_types=filtered_types,
+ )
if event.is_state():
state_ids = state_ids.copy()
state_ids[(event.type, event.state_key)] = event.event_id
defer.returnValue(state_ids)
@defer.inlineCallbacks
- def get_state_at(self, room_id, stream_position):
+ def get_state_at(self, room_id, stream_position, types=None, filtered_types=None):
""" Get the room state at a particular stream position
Args:
room_id(str): room for which to get state
stream_position(StreamToken): point at which to get state
+ types(list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
A Deferred map from ((type, state_key)->Event)
@@ -453,7 +485,9 @@ class SyncHandler(object):
if last_events:
last_event = last_events[-1]
- state = yield self.get_state_after_event(last_event)
+ state = yield self.get_state_after_event(
+ last_event, types, filtered_types=filtered_types,
+ )
else:
# no events in this room - so presumably no state
@@ -485,59 +519,129 @@ class SyncHandler(object):
# TODO(mjark) Check for new redactions in the state events.
with Measure(self.clock, "compute_state_delta"):
+
+ types = None
+ filtered_types = None
+
+ lazy_load_members = sync_config.filter_collection.lazy_load_members()
+ include_redundant_members = (
+ sync_config.filter_collection.include_redundant_members()
+ )
+
+ if lazy_load_members:
+ # We only request state for the members needed to display the
+ # timeline:
+
+ types = [
+ (EventTypes.Member, state_key)
+ for state_key in set(
+ event.sender # FIXME: we also care about invite targets etc.
+ for event in batch.events
+ )
+ ]
+
+ # only apply the filtering to room members
+ filtered_types = [EventTypes.Member]
+
+ timeline_state = {
+ (event.type, event.state_key): event.event_id
+ for event in batch.events if event.is_state()
+ }
+
if full_state:
if batch:
current_state_ids = yield self.store.get_state_ids_for_event(
- batch.events[-1].event_id
+ batch.events[-1].event_id, types=types,
+ filtered_types=filtered_types,
)
state_ids = yield self.store.get_state_ids_for_event(
- batch.events[0].event_id
+ batch.events[0].event_id, types=types,
+ filtered_types=filtered_types,
)
+
else:
current_state_ids = yield self.get_state_at(
- room_id, stream_position=now_token
+ room_id, stream_position=now_token, types=types,
+ filtered_types=filtered_types,
)
state_ids = current_state_ids
- timeline_state = {
- (event.type, event.state_key): event.event_id
- for event in batch.events if event.is_state()
- }
-
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_ids,
previous={},
current=current_state_ids,
+ lazy_load_members=lazy_load_members,
)
elif batch.limited:
state_at_previous_sync = yield self.get_state_at(
- room_id, stream_position=since_token
+ room_id, stream_position=since_token, types=types,
+ filtered_types=filtered_types,
)
current_state_ids = yield self.store.get_state_ids_for_event(
- batch.events[-1].event_id
+ batch.events[-1].event_id, types=types,
+ filtered_types=filtered_types,
)
state_at_timeline_start = yield self.store.get_state_ids_for_event(
- batch.events[0].event_id
+ batch.events[0].event_id, types=types,
+ filtered_types=filtered_types,
)
- timeline_state = {
- (event.type, event.state_key): event.event_id
- for event in batch.events if event.is_state()
- }
-
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
previous=state_at_previous_sync,
current=current_state_ids,
+ lazy_load_members=lazy_load_members,
)
else:
state_ids = {}
+ if lazy_load_members:
+ if types:
+ state_ids = yield self.store.get_state_ids_for_event(
+ batch.events[0].event_id, types=types,
+ filtered_types=filtered_types,
+ )
+
+ if lazy_load_members and not include_redundant_members:
+ cache_key = (sync_config.user.to_string(), sync_config.device_id)
+ cache = self.lazy_loaded_members_cache.get(cache_key)
+ if cache is None:
+ logger.debug("creating LruCache for %r", cache_key)
+ cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE)
+ self.lazy_loaded_members_cache[cache_key] = cache
+ else:
+ logger.debug("found LruCache for %r", cache_key)
+
+ # if it's a new sync sequence, then assume the client has had
+ # amnesia and doesn't want any recent lazy-loaded members
+ # de-duplicated.
+ if since_token is None:
+ logger.debug("clearing LruCache for %r", cache_key)
+ cache.clear()
+ else:
+ # only send members which aren't in our LruCache (either
+ # because they're new to this client or have been pushed out
+ # of the cache)
+ logger.debug("filtering state from %r...", state_ids)
+ state_ids = {
+ t: event_id
+ for t, event_id in state_ids.iteritems()
+ if cache.get(t[1]) != event_id
+ }
+ logger.debug("...to %r", state_ids)
+
+ # add any member IDs we are about to send into our LruCache
+ for t, event_id in itertools.chain(
+ state_ids.items(),
+ timeline_state.items(),
+ ):
+ if t[0] == EventTypes.Member:
+ cache.set(t[1], event_id)
state = {}
if state_ids:
@@ -1448,7 +1552,9 @@ def _action_has_highlight(actions):
return False
-def _calculate_state(timeline_contains, timeline_start, previous, current):
+def _calculate_state(
+ timeline_contains, timeline_start, previous, current, lazy_load_members,
+):
"""Works out what state to include in a sync response.
Args:
@@ -1457,6 +1563,9 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
previous (dict): state at the end of the previous sync (or empty dict
if this is an initial sync)
current (dict): state at the end of the timeline
+ lazy_load_members (bool): whether to return members from timeline_start
+ or not. assumes that timeline_start has already been filtered to
+ include only the members the client needs to know about.
Returns:
dict
@@ -1472,9 +1581,25 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
}
c_ids = set(e for e in current.values())
- tc_ids = set(e for e in timeline_contains.values())
- p_ids = set(e for e in previous.values())
ts_ids = set(e for e in timeline_start.values())
+ p_ids = set(e for e in previous.values())
+ tc_ids = set(e for e in timeline_contains.values())
+
+ # If we are lazyloading room members, we explicitly add the membership events
+ # for the senders in the timeline into the state block returned by /sync,
+ # as we may not have sent them to the client before. We find these membership
+ # events by filtering them out of timeline_start, which has already been filtered
+ # to only include membership events for the senders in the timeline.
+ # In practice, we can do this by removing them from the p_ids list,
+ # which is the list of relevant state we know we have already sent to the client.
+ # see https://github.com/matrix-org/synapse/pull/2970
+ # /files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809
+
+ if lazy_load_members:
+ p_ids.difference_update(
+ e for t, e in timeline_start.iteritems()
+ if t[0] == EventTypes.Member
+ )
state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids
diff --git a/synapse/http/client.py b/synapse/http/client.py
index d6a0d75b..3771e0b3 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -26,9 +26,11 @@ from OpenSSL.SSL import VERIFY_NONE
from twisted.internet import defer, protocol, reactor, ssl, task
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, BrowserLikeRedirectAgent, ContentDecoderAgent
-from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
from twisted.web.client import (
+ Agent,
+ BrowserLikeRedirectAgent,
+ ContentDecoderAgent,
+ FileBodyProducer as TwistedFileBodyProducer,
GzipDecoder,
HTTPConnectionPool,
PartialDownloadError,
@@ -37,12 +39,7 @@ from twisted.web.client import (
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
-from synapse.api.errors import (
- CodeMessageException,
- Codes,
- MatrixCodeMessageException,
- SynapseError,
-)
+from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.http import cancelled_to_request_timed_out_error, redact_uri
from synapse.http.endpoint import SpiderEndpoint
from synapse.util.async import add_timeout_to_deferred
@@ -130,6 +127,11 @@ class SimpleHttpClient(object):
Returns:
Deferred[object]: parsed json
+
+ Raises:
+ HttpResponseException: On a non-2xx HTTP response.
+
+ ValueError: if the response was not JSON
"""
# TODO: Do we ever want to log message contents?
@@ -153,7 +155,10 @@ class SimpleHttpClient(object):
body = yield make_deferred_yieldable(readBody(response))
- defer.returnValue(json.loads(body))
+ if 200 <= response.code < 300:
+ defer.returnValue(json.loads(body))
+ else:
+ raise HttpResponseException(response.code, response.phrase, body)
@defer.inlineCallbacks
def post_json_get_json(self, uri, post_json, headers=None):
@@ -167,6 +172,11 @@ class SimpleHttpClient(object):
Returns:
Deferred[object]: parsed json
+
+ Raises:
+ HttpResponseException: On a non-2xx HTTP response.
+
+ ValueError: if the response was not JSON
"""
json_str = encode_canonical_json(post_json)
@@ -191,9 +201,7 @@ class SimpleHttpClient(object):
if 200 <= response.code < 300:
defer.returnValue(json.loads(body))
else:
- raise self._exceptionFromFailedRequest(response, body)
-
- defer.returnValue(json.loads(body))
+ raise HttpResponseException(response.code, response.phrase, body)
@defer.inlineCallbacks
def get_json(self, uri, args={}, headers=None):
@@ -211,14 +219,12 @@ class SimpleHttpClient(object):
Deferred: Succeeds when we get *any* 2xx HTTP response, with the
HTTP body as JSON.
Raises:
- On a non-2xx HTTP response. The response body will be used as the
- error message.
+ HttpResponseException On a non-2xx HTTP response.
+
+ ValueError: if the response was not JSON
"""
- try:
- body = yield self.get_raw(uri, args, headers=headers)
- defer.returnValue(json.loads(body))
- except CodeMessageException as e:
- raise self._exceptionFromFailedRequest(e.code, e.msg)
+ body = yield self.get_raw(uri, args, headers=headers)
+ defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def put_json(self, uri, json_body, args={}, headers=None):
@@ -237,7 +243,9 @@ class SimpleHttpClient(object):
Deferred: Succeeds when we get *any* 2xx HTTP response, with the
HTTP body as JSON.
Raises:
- On a non-2xx HTTP response.
+ HttpResponseException On a non-2xx HTTP response.
+
+ ValueError: if the response was not JSON
"""
if len(args):
query_bytes = urllib.urlencode(args, True)
@@ -264,10 +272,7 @@ class SimpleHttpClient(object):
if 200 <= response.code < 300:
defer.returnValue(json.loads(body))
else:
- # NB: This is explicitly not json.loads(body)'d because the contract
- # of CodeMessageException is a *string* message. Callers can always
- # load it into JSON if they want.
- raise CodeMessageException(response.code, body)
+ raise HttpResponseException(response.code, response.phrase, body)
@defer.inlineCallbacks
def get_raw(self, uri, args={}, headers=None):
@@ -285,8 +290,7 @@ class SimpleHttpClient(object):
Deferred: Succeeds when we get *any* 2xx HTTP response, with the
HTTP body at text.
Raises:
- On a non-2xx HTTP response. The response body will be used as the
- error message.
+ HttpResponseException on a non-2xx HTTP response.
"""
if len(args):
query_bytes = urllib.urlencode(args, True)
@@ -309,16 +313,7 @@ class SimpleHttpClient(object):
if 200 <= response.code < 300:
defer.returnValue(body)
else:
- raise CodeMessageException(response.code, body)
-
- def _exceptionFromFailedRequest(self, response, body):
- try:
- jsonBody = json.loads(body)
- errcode = jsonBody['errcode']
- error = jsonBody['error']
- return MatrixCodeMessageException(response.code, error, errcode)
- except (ValueError, KeyError):
- return CodeMessageException(response.code, body)
+ raise HttpResponseException(response.code, response.phrase, body)
# XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
# The two should be factored out.
diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py
index f24b4b94..588e2805 100644
--- a/synapse/http/request_metrics.py
+++ b/synapse/http/request_metrics.py
@@ -38,7 +38,8 @@ outgoing_responses_counter = Counter(
)
response_timer = Histogram(
- "synapse_http_server_response_time_seconds", "sec", ["method", "servlet", "tag"]
+ "synapse_http_server_response_time_seconds", "sec",
+ ["method", "servlet", "tag", "code"],
)
response_ru_utime = Counter(
@@ -171,11 +172,13 @@ class RequestMetrics(object):
)
return
- outgoing_responses_counter.labels(request.method, str(request.code)).inc()
+ response_code = str(request.code)
+
+ outgoing_responses_counter.labels(request.method, response_code).inc()
response_count.labels(request.method, self.name, tag).inc()
- response_timer.labels(request.method, self.name, tag).observe(
+ response_timer.labels(request.method, self.name, tag, response_code).observe(
time_sec - self.start
)
diff --git a/synapse/http/server.py b/synapse/http/server.py
index c70fdbdf..6dacb310 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -13,12 +13,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import cgi
import collections
import logging
-import urllib
-from six.moves import http_client
+from six import PY3
+from six.moves import http_client, urllib
from canonicaljson import encode_canonical_json, encode_pretty_printed_json, json
@@ -35,7 +36,6 @@ from synapse.api.errors import (
Codes,
SynapseError,
UnrecognizedRequestError,
- cs_exception,
)
from synapse.http.request_metrics import requests_counter
from synapse.util.caches import intern_dict
@@ -76,16 +76,13 @@ def wrap_json_request_handler(h):
def wrapped_request_handler(self, request):
try:
yield h(self, request)
- except CodeMessageException as e:
+ except SynapseError as e:
code = e.code
- if isinstance(e, SynapseError):
- logger.info(
- "%s SynapseError: %s - %s", request, code, e.msg
- )
- else:
- logger.exception(e)
+ logger.info(
+ "%s SynapseError: %s - %s", request, code, e.msg
+ )
respond_with_json(
- request, code, cs_exception(e), send_cors=True,
+ request, code, e.error_dict(), send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
)
@@ -264,6 +261,7 @@ class JsonResource(HttpServer, resource.Resource):
self.hs = hs
def register_paths(self, method, path_patterns, callback):
+ method = method.encode("utf-8") # method is bytes on py3
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
@@ -296,8 +294,19 @@ class JsonResource(HttpServer, resource.Resource):
# here. If it throws an exception, that is handled by the wrapper
# installed by @request_handler.
+ def _unquote(s):
+ if PY3:
+ # On Python 3, unquote is unicode -> unicode
+ return urllib.parse.unquote(s)
+ else:
+ # On Python 2, unquote is bytes -> bytes We need to encode the
+ # URL again (as it was decoded by _get_handler_for request), as
+ # ASCII because it's a URL, and then decode it to get the UTF-8
+ # characters that were quoted.
+ return urllib.parse.unquote(s.encode('ascii')).decode('utf8')
+
kwargs = intern_dict({
- name: urllib.unquote(value).decode("UTF-8") if value else value
+ name: _unquote(value) if value else value
for name, value in group_dict.items()
})
@@ -313,9 +322,9 @@ class JsonResource(HttpServer, resource.Resource):
request (twisted.web.http.Request):
Returns:
- Tuple[Callable, dict[str, str]]: callback method, and the dict
- mapping keys to path components as specified in the handler's
- path match regexp.
+ Tuple[Callable, dict[unicode, unicode]]: callback method, and the
+ dict mapping keys to path components as specified in the
+ handler's path match regexp.
The callback will normally be a method registered via
register_paths, so will return (possibly via Deferred) either
@@ -327,7 +336,7 @@ class JsonResource(HttpServer, resource.Resource):
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request.method, []):
- m = path_entry.pattern.match(request.path)
+ m = path_entry.pattern.match(request.path.decode('ascii'))
if m:
# We found a match!
return path_entry.callback, m.groupdict()
@@ -383,7 +392,7 @@ class RootRedirect(resource.Resource):
self.url = path
def render_GET(self, request):
- return redirectTo(self.url, request)
+ return redirectTo(self.url.encode('ascii'), request)
def getChild(self, name, request):
if len(name) == 0:
@@ -404,12 +413,14 @@ def respond_with_json(request, code, json_object, send_cors=False,
return
if pretty_print:
- json_bytes = encode_pretty_printed_json(json_object) + "\n"
+ json_bytes = (encode_pretty_printed_json(json_object) + "\n"
+ ).encode("utf-8")
else:
if canonical_json or synapse.events.USE_FROZEN_DICTS:
+ # canonicaljson already encodes to bytes
json_bytes = encode_canonical_json(json_object)
else:
- json_bytes = json.dumps(json_object)
+ json_bytes = json.dumps(json_object).encode("utf-8")
return respond_with_json_bytes(
request, code, json_bytes,
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 882816dc..69f70852 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -171,8 +171,16 @@ def parse_json_value_from_request(request, allow_empty_body=False):
if not content_bytes and allow_empty_body:
return None
+ # Decode to Unicode so that simplejson will return Unicode strings on
+ # Python 2
try:
- content = json.loads(content_bytes)
+ content_unicode = content_bytes.decode('utf8')
+ except UnicodeDecodeError:
+ logger.warn("Unable to decode UTF-8")
+ raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
+
+ try:
+ content = json.loads(content_unicode)
except Exception as e:
logger.warn("Unable to parse JSON: %s", e)
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
new file mode 100644
index 00000000..ce678d5f
--- /dev/null
+++ b/synapse/metrics/background_process_metrics.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+
+from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
+
+from twisted.internet import defer
+
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+
+_background_process_start_count = Counter(
+ "synapse_background_process_start_count",
+ "Number of background processes started",
+ ["name"],
+)
+
+# we set registry=None in all of these to stop them getting registered with
+# the default registry. Instead we collect them all via the CustomCollector,
+# which ensures that we can update them before they are collected.
+#
+_background_process_ru_utime = Counter(
+ "synapse_background_process_ru_utime_seconds",
+ "User CPU time used by background processes, in seconds",
+ ["name"],
+ registry=None,
+)
+
+_background_process_ru_stime = Counter(
+ "synapse_background_process_ru_stime_seconds",
+ "System CPU time used by background processes, in seconds",
+ ["name"],
+ registry=None,
+)
+
+_background_process_db_txn_count = Counter(
+ "synapse_background_process_db_txn_count",
+ "Number of database transactions done by background processes",
+ ["name"],
+ registry=None,
+)
+
+_background_process_db_txn_duration = Counter(
+ "synapse_background_process_db_txn_duration_seconds",
+ ("Seconds spent by background processes waiting for database "
+ "transactions, excluding scheduling time"),
+ ["name"],
+ registry=None,
+)
+
+_background_process_db_sched_duration = Counter(
+ "synapse_background_process_db_sched_duration_seconds",
+ "Seconds spent by background processes waiting for database connections",
+ ["name"],
+ registry=None,
+)
+
+# map from description to a counter, so that we can name our logcontexts
+# incrementally. (It actually duplicates _background_process_start_count, but
+# it's much simpler to do so than to try to combine them.)
+_background_process_counts = dict() # type: dict[str, int]
+
+# map from description to the currently running background processes.
+#
+# it's kept as a dict of sets rather than a big set so that we can keep track
+# of process descriptions that no longer have any active processes.
+_background_processes = dict() # type: dict[str, set[_BackgroundProcess]]
+
+
+class _Collector(object):
+ """A custom metrics collector for the background process metrics.
+
+ Ensures that all of the metrics are up-to-date with any in-flight processes
+ before they are returned.
+ """
+ def collect(self):
+ background_process_in_flight_count = GaugeMetricFamily(
+ "synapse_background_process_in_flight_count",
+ "Number of background processes in flight",
+ labels=["name"],
+ )
+
+ for desc, processes in six.iteritems(_background_processes):
+ background_process_in_flight_count.add_metric(
+ (desc,), len(processes),
+ )
+ for process in processes:
+ process.update_metrics()
+
+ yield background_process_in_flight_count
+
+ # now we need to run collect() over each of the static Counters, and
+ # yield each metric they return.
+ for m in (
+ _background_process_ru_utime,
+ _background_process_ru_stime,
+ _background_process_db_txn_count,
+ _background_process_db_txn_duration,
+ _background_process_db_sched_duration,
+ ):
+ for r in m.collect():
+ yield r
+
+
+REGISTRY.register(_Collector())
+
+
+class _BackgroundProcess(object):
+ def __init__(self, desc, ctx):
+ self.desc = desc
+ self._context = ctx
+ self._reported_stats = None
+
+ def update_metrics(self):
+ """Updates the metrics with values from this process."""
+ new_stats = self._context.get_resource_usage()
+ if self._reported_stats is None:
+ diff = new_stats
+ else:
+ diff = new_stats - self._reported_stats
+ self._reported_stats = new_stats
+
+ _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
+ _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+ _background_process_db_txn_count.labels(self.desc).inc(
+ diff.db_txn_count,
+ )
+ _background_process_db_txn_duration.labels(self.desc).inc(
+ diff.db_txn_duration_sec,
+ )
+ _background_process_db_sched_duration.labels(self.desc).inc(
+ diff.db_sched_duration_sec,
+ )
+
+
+def run_as_background_process(desc, func, *args, **kwargs):
+ """Run the given function in its own logcontext, with resource metrics
+
+ This should be used to wrap processes which are fired off to run in the
+ background, instead of being associated with a particular request.
+
+ It returns a Deferred which completes when the function completes, but it doesn't
+ follow the synapse logcontext rules, which makes it appropriate for passing to
+ clock.looping_call and friends (or for firing-and-forgetting in the middle of a
+ normal synapse inlineCallbacks function).
+
+ Args:
+ desc (str): a description for this background process type
+ func: a function, which may return a Deferred
+ args: positional args for func
+ kwargs: keyword args for func
+
+ Returns: Deferred which returns the result of func, but note that it does not
+ follow the synapse logcontext rules.
+ """
+ @defer.inlineCallbacks
+ def run():
+ count = _background_process_counts.get(desc, 0)
+ _background_process_counts[desc] = count + 1
+ _background_process_start_count.labels(desc).inc()
+
+ with LoggingContext(desc) as context:
+ context.request = "%s-%i" % (desc, count)
+ proc = _BackgroundProcess(desc, context)
+ _background_processes.setdefault(desc, set()).add(proc)
+ try:
+ yield func(*args, **kwargs)
+ finally:
+ proc.update_metrics()
+ _background_processes[desc].remove(proc)
+
+ with PreserveLoggingContext():
+ return run()
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 51cbd66f..e650c3e4 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -274,7 +274,7 @@ class Notifier(object):
logger.exception("Error notifying application services of event")
def on_new_event(self, stream_key, new_token, users=[], rooms=[]):
- """ Used to inform listeners that something has happend event wise.
+ """ Used to inform listeners that something has happened event wise.
Will wake up all listeners for the given users and rooms.
"""
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index bb181d94..1d14d363 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -112,7 +112,8 @@ class BulkPushRuleEvaluator(object):
@defer.inlineCallbacks
def _get_power_levels_and_sender_level(self, event, context):
- pl_event_id = context.prev_state_ids.get(POWER_KEY)
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ pl_event_id = prev_state_ids.get(POWER_KEY)
if pl_event_id:
# fastpath: if there's a power level event, that's all we need, and
# not having a power level event is an extreme edge case
@@ -120,7 +121,7 @@ class BulkPushRuleEvaluator(object):
auth_events = {POWER_KEY: pl_event}
else:
auth_events_ids = yield self.auth.compute_auth_events(
- event, context.prev_state_ids, for_verification=False,
+ event, prev_state_ids, for_verification=False,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
@@ -304,7 +305,7 @@ class RulesForRoom(object):
push_rules_delta_state_cache_metric.inc_hits()
else:
- current_state_ids = context.current_state_ids
+ current_state_ids = yield context.get_current_state_ids(self.store)
push_rules_delta_state_cache_metric.inc_misses()
push_rules_state_size_counter.inc(len(current_state_ids))
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 6bfc8a5b..7a3cfb15 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -18,7 +18,7 @@ import re
from twisted.internet import defer
-from synapse.api.errors import MatrixCodeMessageException, SynapseError
+from synapse.api.errors import HttpResponseException
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.types import Requester, UserID
from synapse.util.distributor import user_joined_room, user_left_room
@@ -56,11 +56,11 @@ def remote_join(client, host, port, requester, remote_room_hosts,
try:
result = yield client.post_json_get_json(uri, payload)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
# We convert to SynapseError as we know that it was a SynapseError
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
- raise SynapseError(e.code, e.msg, e.errcode)
+ raise e.to_synapse_error()
defer.returnValue(result)
@@ -92,11 +92,11 @@ def remote_reject_invite(client, host, port, requester, remote_room_hosts,
try:
result = yield client.post_json_get_json(uri, payload)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
# We convert to SynapseError as we know that it was a SynapseError
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
- raise SynapseError(e.code, e.msg, e.errcode)
+ raise e.to_synapse_error()
defer.returnValue(result)
@@ -131,11 +131,11 @@ def get_or_register_3pid_guest(client, host, port, requester,
try:
result = yield client.post_json_get_json(uri, payload)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
# We convert to SynapseError as we know that it was a SynapseError
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
- raise SynapseError(e.code, e.msg, e.errcode)
+ raise e.to_synapse_error()
defer.returnValue(result)
@@ -165,11 +165,11 @@ def notify_user_membership_change(client, host, port, user_id, room_id, change):
try:
result = yield client.post_json_get_json(uri, payload)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
# We convert to SynapseError as we know that it was a SynapseError
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
- raise SynapseError(e.code, e.msg, e.errcode)
+ raise e.to_synapse_error()
defer.returnValue(result)
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 2eede547..d3509dc2 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -18,11 +18,7 @@ import re
from twisted.internet import defer
-from synapse.api.errors import (
- CodeMessageException,
- MatrixCodeMessageException,
- SynapseError,
-)
+from synapse.api.errors import CodeMessageException, HttpResponseException
from synapse.events import FrozenEvent
from synapse.events.snapshot import EventContext
from synapse.http.servlet import RestServlet, parse_json_object_from_request
@@ -34,12 +30,13 @@ logger = logging.getLogger(__name__)
@defer.inlineCallbacks
-def send_event_to_master(clock, client, host, port, requester, event, context,
+def send_event_to_master(clock, store, client, host, port, requester, event, context,
ratelimit, extra_users):
"""Send event to be handled on the master
Args:
clock (synapse.util.Clock)
+ store (DataStore)
client (SimpleHttpClient)
host (str): host of master
port (int): port on master listening for HTTP replication
@@ -53,11 +50,13 @@ def send_event_to_master(clock, client, host, port, requester, event, context,
host, port, event.event_id,
)
+ serialized_context = yield context.serialize(event, store)
+
payload = {
"event": event.get_pdu_json(),
"internal_metadata": event.internal_metadata.get_dict(),
"rejected_reason": event.rejected_reason,
- "context": context.serialize(event),
+ "context": serialized_context,
"requester": requester.serialize(),
"ratelimit": ratelimit,
"extra_users": [u.to_string() for u in extra_users],
@@ -80,11 +79,11 @@ def send_event_to_master(clock, client, host, port, requester, event, context,
# If we timed out we probably don't need to worry about backing
# off too much, but lets just wait a little anyway.
yield clock.sleep(1)
- except MatrixCodeMessageException as e:
+ except HttpResponseException as e:
# We convert to SynapseError as we know that it was a SynapseError
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
- raise SynapseError(e.code, e.msg, e.errcode)
+ raise e.to_synapse_error()
defer.returnValue(result)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index e592ab57..970e9431 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -192,7 +192,7 @@ class ReplicationClientHandler(object):
"""Returns a deferred that is resolved when we receive a SYNC command
with given data.
- Used by tests.
+ [Not currently] used by tests.
"""
return self.awaiting_syncs.setdefault(data, defer.Deferred())
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 611fb66e..fd59f159 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -25,6 +25,7 @@ from twisted.internet import defer
from twisted.internet.protocol import Factory
from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.metrics import Measure, measure_func
from .protocol import ServerReplicationStreamProtocol
@@ -117,7 +118,6 @@ class ReplicationStreamer(object):
for conn in self.connections:
conn.send_error("server shutting down")
- @defer.inlineCallbacks
def on_notifier_poke(self):
"""Checks if there is actually any new data and sends it to the
connections if there are.
@@ -132,14 +132,16 @@ class ReplicationStreamer(object):
stream.discard_updates_and_advance()
return
- # If we're in the process of checking for new updates, mark that fact
- # and return
+ self.pending_updates = True
+
if self.is_looping:
- logger.debug("Noitifier poke loop already running")
- self.pending_updates = True
+ logger.debug("Notifier poke loop already running")
return
- self.pending_updates = True
+ run_as_background_process("replication_notifier", self._run_notifier_loop)
+
+ @defer.inlineCallbacks
+ def _run_notifier_loop(self):
self.is_looping = True
try:
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 75c2a4ec..3418f06f 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,13 +14,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from six import PY3
+
from synapse.http.server import JsonResource
from synapse.rest.client import versions
-from synapse.rest.client.v1 import admin, directory, events, initial_sync
-from synapse.rest.client.v1 import login as v1_login
-from synapse.rest.client.v1 import logout, presence, profile, push_rule, pusher
-from synapse.rest.client.v1 import register as v1_register
-from synapse.rest.client.v1 import room, voip
+from synapse.rest.client.v1 import (
+ admin,
+ directory,
+ events,
+ initial_sync,
+ login as v1_login,
+ logout,
+ presence,
+ profile,
+ push_rule,
+ pusher,
+ room,
+ voip,
+)
from synapse.rest.client.v2_alpha import (
account,
account_data,
@@ -42,6 +54,11 @@ from synapse.rest.client.v2_alpha import (
user_directory,
)
+if not PY3:
+ from synapse.rest.client.v1_only import (
+ register as v1_register,
+ )
+
class ClientRestResource(JsonResource):
"""A resource for version 1 of the matrix client API."""
@@ -54,14 +71,22 @@ class ClientRestResource(JsonResource):
def register_servlets(client_resource, hs):
versions.register_servlets(client_resource)
- # "v1"
- room.register_servlets(hs, client_resource)
+ if not PY3:
+ # "v1" (Python 2 only)
+ v1_register.register_servlets(hs, client_resource)
+
+ # Deprecated in r0
+ initial_sync.register_servlets(hs, client_resource)
+ room.register_deprecated_servlets(hs, client_resource)
+
+ # Partially deprecated in r0
events.register_servlets(hs, client_resource)
- v1_register.register_servlets(hs, client_resource)
+
+ # "v1" + "r0"
+ room.register_servlets(hs, client_resource)
v1_login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
presence.register_servlets(hs, client_resource)
- initial_sync.register_servlets(hs, client_resource)
directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource)
admin.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index 2dc50e58..80d625ee 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -14,8 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import hashlib
+import hmac
import logging
+from six import text_type
from six.moves import http_client
from twisted.internet import defer
@@ -63,6 +66,132 @@ class UsersRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
+class UserRegisterServlet(ClientV1RestServlet):
+ """
+ Attributes:
+ NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
+ nonces (dict[str, int]): The nonces that we will accept. A dict of
+ nonce to the time it was generated, in int seconds.
+ """
+ PATTERNS = client_path_patterns("/admin/register")
+ NONCE_TIMEOUT = 60
+
+ def __init__(self, hs):
+ super(UserRegisterServlet, self).__init__(hs)
+ self.handlers = hs.get_handlers()
+ self.reactor = hs.get_reactor()
+ self.nonces = {}
+ self.hs = hs
+
+ def _clear_old_nonces(self):
+ """
+ Clear out old nonces that are older than NONCE_TIMEOUT.
+ """
+ now = int(self.reactor.seconds())
+
+ for k, v in list(self.nonces.items()):
+ if now - v > self.NONCE_TIMEOUT:
+ del self.nonces[k]
+
+ def on_GET(self, request):
+ """
+ Generate a new nonce.
+ """
+ self._clear_old_nonces()
+
+ nonce = self.hs.get_secrets().token_hex(64)
+ self.nonces[nonce] = int(self.reactor.seconds())
+ return (200, {"nonce": nonce.encode('ascii')})
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ self._clear_old_nonces()
+
+ if not self.hs.config.registration_shared_secret:
+ raise SynapseError(400, "Shared secret registration is not enabled")
+
+ body = parse_json_object_from_request(request)
+
+ if "nonce" not in body:
+ raise SynapseError(
+ 400, "nonce must be specified", errcode=Codes.BAD_JSON,
+ )
+
+ nonce = body["nonce"]
+
+ if nonce not in self.nonces:
+ raise SynapseError(
+ 400, "unrecognised nonce",
+ )
+
+ # Delete the nonce, so it can't be reused, even if it's invalid
+ del self.nonces[nonce]
+
+ if "username" not in body:
+ raise SynapseError(
+ 400, "username must be specified", errcode=Codes.BAD_JSON,
+ )
+ else:
+ if (
+ not isinstance(body['username'], text_type)
+ or len(body['username']) > 512
+ ):
+ raise SynapseError(400, "Invalid username")
+
+ username = body["username"].encode("utf-8")
+ if b"\x00" in username:
+ raise SynapseError(400, "Invalid username")
+
+ if "password" not in body:
+ raise SynapseError(
+ 400, "password must be specified", errcode=Codes.BAD_JSON,
+ )
+ else:
+ if (
+ not isinstance(body['password'], text_type)
+ or len(body['password']) > 512
+ ):
+ raise SynapseError(400, "Invalid password")
+
+ password = body["password"].encode("utf-8")
+ if b"\x00" in password:
+ raise SynapseError(400, "Invalid password")
+
+ admin = body.get("admin", None)
+ got_mac = body["mac"]
+
+ want_mac = hmac.new(
+ key=self.hs.config.registration_shared_secret.encode(),
+ digestmod=hashlib.sha1,
+ )
+ want_mac.update(nonce)
+ want_mac.update(b"\x00")
+ want_mac.update(username)
+ want_mac.update(b"\x00")
+ want_mac.update(password)
+ want_mac.update(b"\x00")
+ want_mac.update(b"admin" if admin else b"notadmin")
+ want_mac = want_mac.hexdigest()
+
+ if not hmac.compare_digest(want_mac, got_mac.encode('ascii')):
+ raise SynapseError(403, "HMAC incorrect")
+
+ # Reuse the parts of RegisterRestServlet to reduce code duplication
+ from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+
+ register = RegisterRestServlet(self.hs)
+
+ (user_id, _) = yield register.registration_handler.register(
+ localpart=body['username'].lower(),
+ password=body["password"],
+ admin=bool(admin),
+ generate_token=False,
+ )
+
+ result = yield register._create_registration_details(user_id, body)
+ defer.returnValue((200, result))
+
+
class WhoisRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/admin/whois/(?P<user_id>[^/]*)")
@@ -123,7 +252,7 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
hs (synapse.server.HomeServer)
"""
super(PurgeHistoryRestServlet, self).__init__(hs)
- self.handlers = hs.get_handlers()
+ self.pagination_handler = hs.get_pagination_handler()
self.store = hs.get_datastore()
@defer.inlineCallbacks
@@ -198,7 +327,7 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
errcode=Codes.BAD_JSON,
)
- purge_id = yield self.handlers.message_handler.start_purge_history(
+ purge_id = yield self.pagination_handler.start_purge_history(
room_id, token,
delete_local_events=delete_local_events,
)
@@ -220,7 +349,7 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
hs (synapse.server.HomeServer)
"""
super(PurgeHistoryStatusRestServlet, self).__init__(hs)
- self.handlers = hs.get_handlers()
+ self.pagination_handler = hs.get_pagination_handler()
@defer.inlineCallbacks
def on_GET(self, request, purge_id):
@@ -230,7 +359,7 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
if not is_admin:
raise AuthError(403, "You are not a server admin")
- purge_status = self.handlers.message_handler.get_purge_status(purge_id)
+ purge_status = self.pagination_handler.get_purge_status(purge_id)
if purge_status is None:
raise NotFoundError("purge id '%s' not found" % purge_id)
@@ -614,3 +743,4 @@ def register_servlets(hs, http_server):
ShutdownRoomRestServlet(hs).register(http_server)
QuarantineMediaInRoom(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
+ UserRegisterServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 69dcd618..97733f30 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -18,7 +18,7 @@ import logging
from twisted.internet import defer
-from synapse.api.errors import AuthError, Codes, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.servlet import parse_json_object_from_request
from synapse.types import RoomAlias
@@ -159,7 +159,7 @@ class ClientDirectoryListServer(ClientV1RestServlet):
def on_GET(self, request, room_id):
room = yield self.store.get_room(room_id)
if room is None:
- raise SynapseError(400, "Unknown room")
+ raise NotFoundError("Unknown room")
defer.returnValue((200, {
"visibility": "public" if room["is_public"] else "private"
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index b70c9c28..0f3a2e8b 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -88,7 +88,7 @@ class EventRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request, event_id):
requester = yield self.auth.get_user_by_req(request)
- event = yield self.event_handler.get_event(requester.user, event_id)
+ event = yield self.event_handler.get_event(requester.user, None, event_id)
time_now = self.clock.time_msec()
if event:
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 3d624478..fa5989e7 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -90,6 +90,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
self.handlers = hs.get_handlers()
self.event_creation_hander = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
+ self.message_handler = hs.get_message_handler()
def register(self, http_server):
# /room/$roomid/state/$eventtype
@@ -124,7 +125,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
format = parse_string(request, "format", default="content",
allowed_values=["content", "event"])
- msg_handler = self.handlers.message_handler
+ msg_handler = self.message_handler
data = yield msg_handler.get_room_data(
user_id=requester.user.to_string(),
room_id=room_id,
@@ -377,14 +378,13 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomMemberListRestServlet, self).__init__(hs)
- self.handlers = hs.get_handlers()
+ self.message_handler = hs.get_message_handler()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
# TODO support Pagination stream API (limit/tokens)
requester = yield self.auth.get_user_by_req(request)
- handler = self.handlers.message_handler
- events = yield handler.get_state_events(
+ events = yield self.message_handler.get_state_events(
room_id=room_id,
user_id=requester.user.to_string(),
)
@@ -406,7 +406,7 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(JoinedRoomMemberListRestServlet, self).__init__(hs)
- self.message_handler = hs.get_handlers().message_handler
+ self.message_handler = hs.get_message_handler()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
@@ -427,7 +427,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomMessageListRestServlet, self).__init__(hs)
- self.handlers = hs.get_handlers()
+ self.pagination_handler = hs.get_pagination_handler()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
@@ -442,8 +442,7 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
event_filter = Filter(json.loads(filter_json))
else:
event_filter = None
- handler = self.handlers.message_handler
- msgs = yield handler.get_messages(
+ msgs = yield self.pagination_handler.get_messages(
room_id=room_id,
requester=requester,
pagin_config=pagination_config,
@@ -460,14 +459,13 @@ class RoomStateRestServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomStateRestServlet, self).__init__(hs)
- self.handlers = hs.get_handlers()
+ self.message_handler = hs.get_message_handler()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
- handler = self.handlers.message_handler
# Get all the current state for this room
- events = yield handler.get_state_events(
+ events = yield self.message_handler.get_state_events(
room_id=room_id,
user_id=requester.user.to_string(),
is_guest=requester.is_guest,
@@ -508,7 +506,7 @@ class RoomEventServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request, room_id, event_id):
requester = yield self.auth.get_user_by_req(request)
- event = yield self.event_handler.get_event(requester.user, event_id)
+ event = yield self.event_handler.get_event(requester.user, room_id, event_id)
time_now = self.clock.time_msec()
if event:
@@ -525,7 +523,7 @@ class RoomEventContextServlet(ClientV1RestServlet):
def __init__(self, hs):
super(RoomEventContextServlet, self).__init__(hs)
self.clock = hs.get_clock()
- self.handlers = hs.get_handlers()
+ self.room_context_handler = hs.get_room_context_handler()
@defer.inlineCallbacks
def on_GET(self, request, room_id, event_id):
@@ -533,11 +531,20 @@ class RoomEventContextServlet(ClientV1RestServlet):
limit = parse_integer(request, "limit", default=10)
- results = yield self.handlers.room_context_handler.get_event_context(
+ # picking the API shape for symmetry with /messages
+ filter_bytes = parse_string(request, "filter")
+ if filter_bytes:
+ filter_json = urlparse.unquote(filter_bytes).decode("UTF-8")
+ event_filter = Filter(json.loads(filter_json))
+ else:
+ event_filter = None
+
+ results = yield self.room_context_handler.get_event_context(
requester.user,
room_id,
event_id,
limit,
+ event_filter,
)
if not results:
@@ -832,10 +839,13 @@ def register_servlets(hs, http_server):
RoomSendEventRestServlet(hs).register(http_server)
PublicRoomListRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
- RoomInitialSyncRestServlet(hs).register(http_server)
RoomRedactEventRestServlet(hs).register(http_server)
RoomTypingRestServlet(hs).register(http_server)
SearchRestServlet(hs).register(http_server)
JoinedRoomsRestServlet(hs).register(http_server)
RoomEventServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
+
+
+def register_deprecated_servlets(hs, http_server):
+ RoomInitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1_only/__init__.py b/synapse/rest/client/v1_only/__init__.py
new file mode 100644
index 00000000..936f902a
--- /dev/null
+++ b/synapse/rest/client/v1_only/__init__.py
@@ -0,0 +1,3 @@
+"""
+REST APIs that are only used in v1 (the legacy API).
+"""
diff --git a/synapse/rest/client/v1_only/base.py b/synapse/rest/client/v1_only/base.py
new file mode 100644
index 00000000..9d4db743
--- /dev/null
+++ b/synapse/rest/client/v1_only/base.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains base REST classes for constructing client v1 servlets.
+"""
+
+import re
+
+from synapse.api.urls import CLIENT_PREFIX
+
+
+def v1_only_client_path_patterns(path_regex, include_in_unstable=True):
+ """Creates a regex compiled client path with the correct client path
+ prefix.
+
+ Args:
+ path_regex (str): The regex string to match. This should NOT have a ^
+ as this will be prefixed.
+ Returns:
+ list of SRE_Pattern
+ """
+ patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)]
+ if include_in_unstable:
+ unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable")
+ patterns.append(re.compile("^" + unstable_prefix + path_regex))
+ return patterns
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1_only/register.py
index 25a143af..3439c3c6 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1_only/register.py
@@ -24,9 +24,10 @@ import synapse.util.stringutils as stringutils
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, SynapseError
from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
+from synapse.rest.client.v1.base import ClientV1RestServlet
from synapse.types import create_requester
-from .base import ClientV1RestServlet, client_path_patterns
+from .base import v1_only_client_path_patterns
logger = logging.getLogger(__name__)
@@ -49,7 +50,7 @@ class RegisterRestServlet(ClientV1RestServlet):
handler doesn't have a concept of multi-stages or sessions.
"""
- PATTERNS = client_path_patterns("/register$", releases=(), include_in_unstable=False)
+ PATTERNS = v1_only_client_path_patterns("/register$", include_in_unstable=False)
def __init__(self, hs):
"""
@@ -379,7 +380,7 @@ class CreateUserRestServlet(ClientV1RestServlet):
"""Handles user creation via a server-to-server interface
"""
- PATTERNS = client_path_patterns("/createUser$", releases=())
+ PATTERNS = v1_only_client_path_patterns("/createUser$")
def __init__(self, hs):
super(CreateUserRestServlet, self).__init__(hs)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index d6cf915d..2f64155d 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -193,15 +193,15 @@ class RegisterRestServlet(RestServlet):
def on_POST(self, request):
body = parse_json_object_from_request(request)
- kind = "user"
- if "kind" in request.args:
- kind = request.args["kind"][0]
+ kind = b"user"
+ if b"kind" in request.args:
+ kind = request.args[b"kind"][0]
- if kind == "guest":
+ if kind == b"guest":
ret = yield self._do_guest_registration(body)
defer.returnValue(ret)
return
- elif kind != "user":
+ elif kind != b"user":
raise UnrecognizedRequestError(
"Do not understand membership kind: %s" % (kind,)
)
@@ -389,8 +389,8 @@ class RegisterRestServlet(RestServlet):
assert_params_in_dict(params, ["password"])
desired_username = params.get("username", None)
- new_password = params.get("password", None)
guest_access_token = params.get("guest_access_token", None)
+ new_password = params.get("password", None)
if desired_username is not None:
desired_username = desired_username.lower()
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 30242c52..8fb413d8 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -35,6 +35,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.async import Linearizer
from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.retryutils import NotRetryingDestination
@@ -100,10 +101,15 @@ class MediaRepository(object):
)
self.clock.looping_call(
- self._update_recently_accessed,
+ self._start_update_recently_accessed,
UPDATE_RECENTLY_ACCESSED_TS,
)
+ def _start_update_recently_accessed(self):
+ return run_as_background_process(
+ "update_recently_accessed_media", self._update_recently_accessed,
+ )
+
@defer.inlineCallbacks
def _update_recently_accessed(self):
remote_media = self.recently_accessed_remotes
@@ -373,7 +379,7 @@ class MediaRepository(object):
logger.warn("HTTP error fetching remote media %s/%s: %s",
server_name, media_id, e.response)
if e.code == twisted.web.http.NOT_FOUND:
- raise SynapseError.from_http_response_exception(e)
+ raise e.to_synapse_error()
raise SynapseError(502, "Failed to fetch remote media")
except SynapseError:
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index b25993fc..a6189224 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -177,7 +177,7 @@ class MediaStorage(object):
if res:
with res:
consumer = BackgroundFileConsumer(
- open(local_path, "w"), self.hs.get_reactor())
+ open(local_path, "wb"), self.hs.get_reactor())
yield res.write_to_consumer(consumer)
yield consumer.wait()
defer.returnValue(local_path)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index b70b15c4..27aa0def 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -41,6 +41,7 @@ from synapse.http.server import (
wrap_json_request_handler,
)
from synapse.http.servlet import parse_integer, parse_string
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.async import ObservableDeferred
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
@@ -81,7 +82,7 @@ class PreviewUrlResource(Resource):
self._cache.start()
self._cleaner_loop = self.clock.looping_call(
- self._expire_url_cache_data, 10 * 1000
+ self._start_expire_url_cache_data, 10 * 1000,
)
def render_OPTIONS(self, request):
@@ -371,6 +372,11 @@ class PreviewUrlResource(Resource):
"etag": headers["ETag"][0] if "ETag" in headers else None,
})
+ def _start_expire_url_cache_data(self):
+ return run_as_background_process(
+ "expire_url_cache_data", self._expire_url_cache_data,
+ )
+
@defer.inlineCallbacks
def _expire_url_cache_data(self):
"""Clean up expired url cache content, media and thumbnails.
diff --git a/synapse/secrets.py b/synapse/secrets.py
new file mode 100644
index 00000000..f05e9ea5
--- /dev/null
+++ b/synapse/secrets.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Injectable secrets module for Synapse.
+
+See https://docs.python.org/3/library/secrets.html#module-secrets for the API
+used in Python 3.6, and the API emulated in Python 2.7.
+"""
+
+import sys
+
+# secrets is available since python 3.6
+if sys.version_info[0:2] >= (3, 6):
+ import secrets
+
+ def Secrets():
+ return secrets
+
+else:
+ import os
+ import binascii
+
+ class Secrets(object):
+ def token_bytes(self, nbytes=32):
+ return os.urandom(nbytes)
+
+ def token_hex(self, nbytes=32):
+ return binascii.hexlify(self.token_bytes(nbytes))
diff --git a/synapse/server.py b/synapse/server.py
index 92bea96c..140be9eb 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -52,12 +52,13 @@ from synapse.handlers.e2e_keys import E2eKeysHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.handlers.initial_sync import InitialSyncHandler
-from synapse.handlers.message import EventCreationHandler
+from synapse.handlers.message import EventCreationHandler, MessageHandler
+from synapse.handlers.pagination import PaginationHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.profile import ProfileHandler
from synapse.handlers.read_marker import ReadMarkerHandler
from synapse.handlers.receipts import ReceiptsHandler
-from synapse.handlers.room import RoomCreationHandler
+from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.room_member import RoomMemberMasterHandler
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
@@ -74,6 +75,7 @@ from synapse.rest.media.v1.media_repository import (
MediaRepository,
MediaRepositoryResource,
)
+from synapse.secrets import Secrets
from synapse.server_notices.server_notices_manager import ServerNoticesManager
from synapse.server_notices.server_notices_sender import ServerNoticesSender
from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender
@@ -158,11 +160,15 @@ class HomeServer(object):
'groups_server_handler',
'groups_attestation_signing',
'groups_attestation_renewer',
+ 'secrets',
'spam_checker',
'room_member_handler',
'federation_registry',
'server_notices_manager',
'server_notices_sender',
+ 'message_handler',
+ 'pagination_handler',
+ 'room_context_handler',
]
def __init__(self, hostname, reactor=None, **kwargs):
@@ -405,6 +411,9 @@ class HomeServer(object):
def build_groups_attestation_renewer(self):
return GroupAttestionRenewer(self)
+ def build_secrets(self):
+ return Secrets()
+
def build_spam_checker(self):
return SpamChecker(self)
@@ -426,6 +435,15 @@ class HomeServer(object):
return WorkerServerNoticesSender(self)
return ServerNoticesSender(self)
+ def build_message_handler(self):
+ return MessageHandler(self)
+
+ def build_pagination_handler(self):
+ return PaginationHandler(self)
+
+ def build_room_context_handler(self):
+ return RoomContextHandler(self)
+
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
diff --git a/synapse/state.py b/synapse/state.py
index 15a593d4..e1092b97 100644
--- a/synapse/state.py
+++ b/synapse/state.py
@@ -18,7 +18,7 @@ import hashlib
import logging
from collections import namedtuple
-from six import iteritems, itervalues
+from six import iteritems, iterkeys, itervalues
from frozendict import frozendict
@@ -203,25 +203,27 @@ class StateHandler(object):
# If this is an outlier, then we know it shouldn't have any current
# state. Certainly store.get_current_state won't return any, and
# persisting the event won't store the state group.
- context = EventContext()
if old_state:
- context.prev_state_ids = {
+ prev_state_ids = {
(s.type, s.state_key): s.event_id for s in old_state
}
if event.is_state():
- context.current_state_ids = dict(context.prev_state_ids)
+ current_state_ids = dict(prev_state_ids)
key = (event.type, event.state_key)
- context.current_state_ids[key] = event.event_id
+ current_state_ids[key] = event.event_id
else:
- context.current_state_ids = context.prev_state_ids
+ current_state_ids = prev_state_ids
else:
- context.current_state_ids = {}
- context.prev_state_ids = {}
- context.prev_state_events = []
+ current_state_ids = {}
+ prev_state_ids = {}
# We don't store state for outliers, so we don't generate a state
- # froup for it.
- context.state_group = None
+ # group for it.
+ context = EventContext.with_state(
+ state_group=None,
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
+ )
defer.returnValue(context)
@@ -230,31 +232,35 @@ class StateHandler(object):
# Let's just correctly fill out the context and create a
# new state group for it.
- context = EventContext()
- context.prev_state_ids = {
+ prev_state_ids = {
(s.type, s.state_key): s.event_id for s in old_state
}
if event.is_state():
key = (event.type, event.state_key)
- if key in context.prev_state_ids:
- replaces = context.prev_state_ids[key]
+ if key in prev_state_ids:
+ replaces = prev_state_ids[key]
if replaces != event.event_id: # Paranoia check
event.unsigned["replaces_state"] = replaces
- context.current_state_ids = dict(context.prev_state_ids)
- context.current_state_ids[key] = event.event_id
+ current_state_ids = dict(prev_state_ids)
+ current_state_ids[key] = event.event_id
else:
- context.current_state_ids = context.prev_state_ids
+ current_state_ids = prev_state_ids
- context.state_group = yield self.store.store_state_group(
+ state_group = yield self.store.store_state_group(
event.event_id,
event.room_id,
prev_group=None,
delta_ids=None,
- current_state_ids=context.current_state_ids,
+ current_state_ids=current_state_ids,
+ )
+
+ context = EventContext.with_state(
+ state_group=state_group,
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
)
- context.prev_state_events = []
defer.returnValue(context)
logger.debug("calling resolve_state_groups from compute_event_context")
@@ -262,47 +268,47 @@ class StateHandler(object):
event.room_id, [e for e, _ in event.prev_events],
)
- curr_state = entry.state
+ prev_state_ids = entry.state
+ prev_group = None
+ delta_ids = None
- context = EventContext()
- context.prev_state_ids = curr_state
if event.is_state():
# If this is a state event then we need to create a new state
# group for the state after this event.
key = (event.type, event.state_key)
- if key in context.prev_state_ids:
- replaces = context.prev_state_ids[key]
+ if key in prev_state_ids:
+ replaces = prev_state_ids[key]
event.unsigned["replaces_state"] = replaces
- context.current_state_ids = dict(context.prev_state_ids)
- context.current_state_ids[key] = event.event_id
+ current_state_ids = dict(prev_state_ids)
+ current_state_ids[key] = event.event_id
if entry.state_group:
# If the state at the event has a state group assigned then
# we can use that as the prev group
- context.prev_group = entry.state_group
- context.delta_ids = {
+ prev_group = entry.state_group
+ delta_ids = {
key: event.event_id
}
elif entry.prev_group:
# If the state at the event only has a prev group, then we can
# use that as a prev group too.
- context.prev_group = entry.prev_group
- context.delta_ids = dict(entry.delta_ids)
- context.delta_ids[key] = event.event_id
+ prev_group = entry.prev_group
+ delta_ids = dict(entry.delta_ids)
+ delta_ids[key] = event.event_id
- context.state_group = yield self.store.store_state_group(
+ state_group = yield self.store.store_state_group(
event.event_id,
event.room_id,
- prev_group=context.prev_group,
- delta_ids=context.delta_ids,
- current_state_ids=context.current_state_ids,
+ prev_group=prev_group,
+ delta_ids=delta_ids,
+ current_state_ids=current_state_ids,
)
else:
- context.current_state_ids = context.prev_state_ids
- context.prev_group = entry.prev_group
- context.delta_ids = entry.delta_ids
+ current_state_ids = prev_state_ids
+ prev_group = entry.prev_group
+ delta_ids = entry.delta_ids
if entry.state_group is None:
entry.state_group = yield self.store.store_state_group(
@@ -310,13 +316,20 @@ class StateHandler(object):
event.room_id,
prev_group=entry.prev_group,
delta_ids=entry.delta_ids,
- current_state_ids=context.current_state_ids,
+ current_state_ids=current_state_ids,
)
entry.state_id = entry.state_group
- context.state_group = entry.state_group
+ state_group = entry.state_group
+
+ context = EventContext.with_state(
+ state_group=state_group,
+ current_state_ids=current_state_ids,
+ prev_state_ids=prev_state_ids,
+ prev_group=prev_group,
+ delta_ids=delta_ids,
+ )
- context.prev_state_events = []
defer.returnValue(context)
@defer.inlineCallbacks
@@ -458,69 +471,39 @@ class StateResolutionHandler(object):
"Resolving state for %s with %d groups", room_id, len(state_groups_ids)
)
- # build a map from state key to the event_ids which set that state.
- # dict[(str, str), set[str])
- state = {}
+ # start by assuming we won't have any conflicted state, and build up the new
+ # state map by iterating through the state groups. If we discover a conflict,
+ # we give up and instead use `resolve_events_with_factory`.
+ #
+ # XXX: is this actually worthwhile, or should we just let
+ # resolve_events_with_factory do it?
+ new_state = {}
+ conflicted_state = False
for st in itervalues(state_groups_ids):
for key, e_id in iteritems(st):
- state.setdefault(key, set()).add(e_id)
-
- # build a map from state key to the event_ids which set that state,
- # including only those where there are state keys in conflict.
- conflicted_state = {
- k: list(v)
- for k, v in iteritems(state)
- if len(v) > 1
- }
+ if key in new_state:
+ conflicted_state = True
+ break
+ new_state[key] = e_id
+ if conflicted_state:
+ break
if conflicted_state:
logger.info("Resolving conflicted state for %r", room_id)
with Measure(self.clock, "state._resolve_events"):
new_state = yield resolve_events_with_factory(
- list(state_groups_ids.values()),
+ list(itervalues(state_groups_ids)),
event_map=event_map,
state_map_factory=state_map_factory,
)
- else:
- new_state = {
- key: e_ids.pop() for key, e_ids in iteritems(state)
- }
- with Measure(self.clock, "state.create_group_ids"):
- # if the new state matches any of the input state groups, we can
- # use that state group again. Otherwise we will generate a state_id
- # which will be used as a cache key for future resolutions, but
- # not get persisted.
- state_group = None
- new_state_event_ids = frozenset(itervalues(new_state))
- for sg, events in iteritems(state_groups_ids):
- if new_state_event_ids == frozenset(e_id for e_id in events):
- state_group = sg
- break
+ # if the new state matches any of the input state groups, we can
+ # use that state group again. Otherwise we will generate a state_id
+ # which will be used as a cache key for future resolutions, but
+ # not get persisted.
- # TODO: We want to create a state group for this set of events, to
- # increase cache hits, but we need to make sure that it doesn't
- # end up as a prev_group without being added to the database
-
- prev_group = None
- delta_ids = None
- for old_group, old_ids in iteritems(state_groups_ids):
- if not set(new_state) - set(old_ids):
- n_delta_ids = {
- k: v
- for k, v in iteritems(new_state)
- if old_ids.get(k) != v
- }
- if not delta_ids or len(n_delta_ids) < len(delta_ids):
- prev_group = old_group
- delta_ids = n_delta_ids
-
- cache = _StateCacheEntry(
- state=new_state,
- state_group=state_group,
- prev_group=prev_group,
- delta_ids=delta_ids,
- )
+ with Measure(self.clock, "state.create_group_ids"):
+ cache = _make_state_cache_entry(new_state, state_groups_ids)
if self._state_cache is not None:
self._state_cache[group_names] = cache
@@ -528,9 +511,73 @@ class StateResolutionHandler(object):
defer.returnValue(cache)
+def _make_state_cache_entry(
+ new_state,
+ state_groups_ids,
+):
+ """Given a resolved state, and a set of input state groups, pick one to base
+ a new state group on (if any), and return an appropriately-constructed
+ _StateCacheEntry.
+
+ Args:
+ new_state (dict[(str, str), str]): resolved state map (mapping from
+ (type, state_key) to event_id)
+
+ state_groups_ids (dict[int, dict[(str, str), str]]):
+ map from state group id to the state in that state group
+ (where 'state' is a map from state key to event id)
+
+ Returns:
+ _StateCacheEntry
+ """
+ # if the new state matches any of the input state groups, we can
+ # use that state group again. Otherwise we will generate a state_id
+ # which will be used as a cache key for future resolutions, but
+ # not get persisted.
+
+ # first look for exact matches
+ new_state_event_ids = set(itervalues(new_state))
+ for sg, state in iteritems(state_groups_ids):
+ if len(new_state_event_ids) != len(state):
+ continue
+
+ old_state_event_ids = set(itervalues(state))
+ if new_state_event_ids == old_state_event_ids:
+ # got an exact match.
+ return _StateCacheEntry(
+ state=new_state,
+ state_group=sg,
+ )
+
+ # TODO: We want to create a state group for this set of events, to
+ # increase cache hits, but we need to make sure that it doesn't
+ # end up as a prev_group without being added to the database
+
+ # failing that, look for the closest match.
+ prev_group = None
+ delta_ids = None
+
+ for old_group, old_state in iteritems(state_groups_ids):
+ n_delta_ids = {
+ k: v
+ for k, v in iteritems(new_state)
+ if old_state.get(k) != v
+ }
+ if not delta_ids or len(n_delta_ids) < len(delta_ids):
+ prev_group = old_group
+ delta_ids = n_delta_ids
+
+ return _StateCacheEntry(
+ state=new_state,
+ state_group=None,
+ prev_group=prev_group,
+ delta_ids=delta_ids,
+ )
+
+
def _ordered_events(events):
def key_func(e):
- return -int(e.depth), hashlib.sha1(e.event_id.encode()).hexdigest()
+ return -int(e.depth), hashlib.sha1(e.event_id.encode('ascii')).hexdigest()
return sorted(events, key=key_func)
@@ -569,7 +616,7 @@ def _seperate(state_sets):
with them in different state sets.
Args:
- state_sets(list[dict[(str, str), str]]):
+ state_sets(iterable[dict[(str, str), str]]):
List of dicts of (type, state_key) -> event_id, which are the
different state groups to resolve.
@@ -583,10 +630,11 @@ def _seperate(state_sets):
conflicted_state is a dict mapping (type, state_key) to a set of
event ids for conflicted state keys.
"""
- unconflicted_state = dict(state_sets[0])
+ state_set_iterator = iter(state_sets)
+ unconflicted_state = dict(next(state_set_iterator))
conflicted_state = {}
- for state_set in state_sets[1:]:
+ for state_set in state_set_iterator:
for key, value in iteritems(state_set):
# Check if there is an unconflicted entry for the state key.
unconflicted_value = unconflicted_state.get(key)
@@ -647,7 +695,7 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
for event_id in event_ids
)
if event_map is not None:
- needed_events -= set(event_map.iterkeys())
+ needed_events -= set(iterkeys(event_map))
logger.info("Asking for %d conflicted events", len(needed_events))
@@ -668,7 +716,7 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
new_needed_events = set(itervalues(auth_events))
new_needed_events -= needed_events
if event_map is not None:
- new_needed_events -= set(event_map.iterkeys())
+ new_needed_events -= set(iterkeys(event_map))
logger.info("Asking for %d auth events", len(new_needed_events))
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index ba88a549..134e4a80 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -66,6 +66,7 @@ class DataStore(RoomMemberStore, RoomStore,
PresenceStore, TransactionStore,
DirectoryStore, KeyStore, StateStore, SignatureStore,
ApplicationServiceStore,
+ EventsStore,
EventFederationStore,
MediaRepositoryStore,
RejectionsStore,
@@ -73,7 +74,6 @@ class DataStore(RoomMemberStore, RoomStore,
PusherStore,
PushRuleStore,
ApplicationServiceTransactionStore,
- EventsStore,
ReceiptsStore,
EndToEndKeyStore,
SearchStore,
@@ -94,6 +94,7 @@ class DataStore(RoomMemberStore, RoomStore,
self._clock = hs.get_clock()
self.database_engine = hs.database_engine
+ self.db_conn = db_conn
self._stream_id_gen = StreamIdGenerator(
db_conn, "events", "stream_ordering",
extra_tables=[("local_invites", "stream_id")]
@@ -266,6 +267,31 @@ class DataStore(RoomMemberStore, RoomStore,
return self.runInteraction("count_users", _count_users)
+ def count_monthly_users(self):
+ """Counts the number of users who used this homeserver in the last 30 days
+
+ This method should be refactored with count_daily_users - the only
+ reason not to is waiting on definition of mau
+
+ Returns:
+ Defered[int]
+ """
+ def _count_monthly_users(txn):
+ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
+ sql = """
+ SELECT COALESCE(count(*), 0) FROM (
+ SELECT user_id FROM user_ips
+ WHERE last_seen > ?
+ GROUP BY user_id
+ ) u
+ """
+
+ txn.execute(sql, (thirty_days_ago,))
+ count, = txn.fetchone()
+ return count
+
+ return self.runInteraction("count_monthly_users", _count_monthly_users)
+
def count_r30_users(self):
"""
Counts the number of 30 day retained users, defined as:-
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index a6a0e6ec..44f37b4c 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -311,6 +311,12 @@ class SQLBaseStore(object):
after_callbacks = []
exception_callbacks = []
+ if LoggingContext.current_context() == LoggingContext.sentinel:
+ logger.warn(
+ "Starting db txn '%s' from sentinel context",
+ desc,
+ )
+
try:
result = yield self.runWithConnection(
self._new_transaction,
@@ -343,10 +349,9 @@ class SQLBaseStore(object):
"""
parent_context = LoggingContext.current_context()
if parent_context == LoggingContext.sentinel:
- # warning disabled for 0.33.0 release; proper fixes will land imminently.
- # logger.warn(
- # "Running db txn from sentinel context: metrics will be lost",
- # )
+ logger.warn(
+ "Starting db connection from sentinel context: metrics will be lost",
+ )
parent_context = None
start_time = time.time()
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index 9f12b360..31248d5e 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -22,7 +22,7 @@ from twisted.internet import defer
from synapse.appservice import AppServiceTransaction
from synapse.config.appservice import load_appservices
-from synapse.storage.events import EventsWorkerStore
+from synapse.storage.events_worker import EventsWorkerStore
from ._base import SQLBaseStore
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index dc9eca7d..5fe1ca2d 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -19,6 +19,8 @@ from canonicaljson import json
from twisted.internet import defer
+from synapse.metrics.background_process_metrics import run_as_background_process
+
from . import engines
from ._base import SQLBaseStore
@@ -87,10 +89,14 @@ class BackgroundUpdateStore(SQLBaseStore):
self._background_update_handlers = {}
self._all_done = False
- @defer.inlineCallbacks
def start_doing_background_updates(self):
- logger.info("Starting background schema updates")
+ run_as_background_process(
+ "background_updates", self._run_background_updates,
+ )
+ @defer.inlineCallbacks
+ def _run_background_updates(self):
+ logger.info("Starting background schema updates")
while True:
yield self.hs.get_clock().sleep(
self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.)
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index b78eda34..b8cefd43 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -19,6 +19,7 @@ from six import iteritems
from twisted.internet import defer
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches import CACHE_SIZE_FACTOR
from . import background_updates
@@ -93,10 +94,16 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
self._batch_row_update[key] = (user_agent, device_id, now)
def _update_client_ips_batch(self):
- to_update = self._batch_row_update
- self._batch_row_update = {}
- return self.runInteraction(
- "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
+ def update():
+ to_update = self._batch_row_update
+ self._batch_row_update = {}
+ return self.runInteraction(
+ "_update_client_ips_batch", self._update_client_ips_batch_txn,
+ to_update,
+ )
+
+ return run_as_background_process(
+ "update_client_ips", update,
)
def _update_client_ips_batch_txn(self, txn, to_update):
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index ec68e39f..c0943ecf 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -21,6 +21,7 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.errors import StoreError
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
from ._base import Cache, SQLBaseStore
@@ -248,17 +249,31 @@ class DeviceStore(SQLBaseStore):
def _update_remote_device_list_cache_entry_txn(self, txn, user_id, device_id,
content, stream_id):
- self._simple_upsert_txn(
- txn,
- table="device_lists_remote_cache",
- keyvalues={
- "user_id": user_id,
- "device_id": device_id,
- },
- values={
- "content": json.dumps(content),
- }
- )
+ if content.get("deleted"):
+ self._simple_delete_txn(
+ txn,
+ table="device_lists_remote_cache",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ },
+ )
+
+ txn.call_after(
+ self.device_id_exists_cache.invalidate, (user_id, device_id,)
+ )
+ else:
+ self._simple_upsert_txn(
+ txn,
+ table="device_lists_remote_cache",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ },
+ values={
+ "content": json.dumps(content),
+ }
+ )
txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,))
txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,))
@@ -366,7 +381,7 @@ class DeviceStore(SQLBaseStore):
now_stream_id = max(stream_id for stream_id in itervalues(query_map))
devices = self._get_e2e_device_keys_txn(
- txn, query_map.keys(), include_all_devices=True
+ txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True
)
prev_sent_id_sql = """
@@ -393,12 +408,15 @@ class DeviceStore(SQLBaseStore):
prev_id = stream_id
- key_json = device.get("key_json", None)
- if key_json:
- result["keys"] = json.loads(key_json)
- device_display_name = device.get("device_display_name", None)
- if device_display_name:
- result["device_display_name"] = device_display_name
+ if device is not None:
+ key_json = device.get("key_json", None)
+ if key_json:
+ result["keys"] = json.loads(key_json)
+ device_display_name = device.get("device_display_name", None)
+ if device_display_name:
+ result["device_display_name"] = device_display_name
+ else:
+ result["deleted"] = True
results.append(result)
@@ -694,6 +712,9 @@ class DeviceStore(SQLBaseStore):
logger.info("Pruned %d device list outbound pokes", txn.rowcount)
- return self.runInteraction(
- "_prune_old_outbound_device_pokes", _prune_txn
+ return run_as_background_process(
+ "prune_old_outbound_device_pokes",
+ self.runInteraction,
+ "_prune_old_outbound_device_pokes",
+ _prune_txn,
)
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 7ae5c654..523b4360 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -64,12 +64,18 @@ class EndToEndKeyStore(SQLBaseStore):
)
@defer.inlineCallbacks
- def get_e2e_device_keys(self, query_list, include_all_devices=False):
+ def get_e2e_device_keys(
+ self, query_list, include_all_devices=False,
+ include_deleted_devices=False,
+ ):
"""Fetch a list of device keys.
Args:
query_list(list): List of pairs of user_ids and device_ids.
include_all_devices (bool): whether to include entries for devices
that don't have device keys
+ include_deleted_devices (bool): whether to include null entries for
+ devices which no longer exist (but were in the query_list).
+ This option only takes effect if include_all_devices is true.
Returns:
Dict mapping from user-id to dict mapping from device_id to
dict containing "key_json", "device_display_name".
@@ -79,7 +85,7 @@ class EndToEndKeyStore(SQLBaseStore):
results = yield self.runInteraction(
"get_e2e_device_keys", self._get_e2e_device_keys_txn,
- query_list, include_all_devices,
+ query_list, include_all_devices, include_deleted_devices,
)
for user_id, device_keys in iteritems(results):
@@ -88,10 +94,19 @@ class EndToEndKeyStore(SQLBaseStore):
defer.returnValue(results)
- def _get_e2e_device_keys_txn(self, txn, query_list, include_all_devices):
+ def _get_e2e_device_keys_txn(
+ self, txn, query_list, include_all_devices=False,
+ include_deleted_devices=False,
+ ):
query_clauses = []
query_params = []
+ if include_all_devices is False:
+ include_deleted_devices = False
+
+ if include_deleted_devices:
+ deleted_devices = set(query_list)
+
for (user_id, device_id) in query_list:
query_clause = "user_id = ?"
query_params.append(user_id)
@@ -119,8 +134,14 @@ class EndToEndKeyStore(SQLBaseStore):
result = {}
for row in rows:
+ if include_deleted_devices:
+ deleted_devices.remove((row["user_id"], row["device_id"]))
result.setdefault(row["user_id"], {})[row["device_id"]] = row
+ if include_deleted_devices:
+ for user_id, device_id in deleted_devices:
+ result.setdefault(user_id, {})[device_id] = None
+
return result
@defer.inlineCallbacks
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 8d366d1b..24345b20 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -23,8 +23,9 @@ from unpaddedbase64 import encode_base64
from twisted.internet import defer
from synapse.api.errors import StoreError
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import SQLBaseStore
-from synapse.storage.events import EventsWorkerStore
+from synapse.storage.events_worker import EventsWorkerStore
from synapse.storage.signatures import SignatureWorkerStore
from synapse.util.caches.descriptors import cached
@@ -113,9 +114,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
sql = (
"SELECT b.event_id, MAX(e.depth) FROM events as e"
" INNER JOIN event_edges as g"
- " ON g.event_id = e.event_id AND g.room_id = e.room_id"
+ " ON g.event_id = e.event_id"
" INNER JOIN event_backward_extremities as b"
- " ON g.prev_event_id = b.event_id AND g.room_id = b.room_id"
+ " ON g.prev_event_id = b.event_id"
" WHERE b.room_id = ? AND g.is_state is ?"
" GROUP BY b.event_id"
)
@@ -329,8 +330,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
"SELECT depth, prev_event_id FROM event_edges"
" INNER JOIN events"
" ON prev_event_id = events.event_id"
- " AND event_edges.room_id = events.room_id"
- " WHERE event_edges.room_id = ? AND event_edges.event_id = ?"
+ " WHERE event_edges.event_id = ?"
" AND event_edges.is_state = ?"
" LIMIT ?"
)
@@ -343,6 +343,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
table="events",
keyvalues={
"event_id": event_id,
+ "room_id": room_id,
},
retcol="depth",
allow_none=True,
@@ -364,7 +365,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
txn.execute(
query,
- (room_id, event_id, False, limit - len(event_results))
+ (event_id, False, limit - len(event_results))
)
for row in txn:
@@ -401,7 +402,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
query = (
"SELECT prev_event_id FROM event_edges "
- "WHERE room_id = ? AND event_id = ? AND is_state = ? "
+ "WHERE event_id = ? AND is_state = ? "
"LIMIT ?"
)
@@ -410,7 +411,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore,
for event_id in front:
txn.execute(
query,
- (room_id, event_id, False, limit - len(event_results))
+ (event_id, False, limit - len(event_results))
)
for e_id, in txn:
@@ -446,7 +447,7 @@ class EventFederationStore(EventFederationWorkerStore):
)
hs.get_clock().looping_call(
- self._delete_old_forward_extrem_cache, 60 * 60 * 1000
+ self._delete_old_forward_extrem_cache, 60 * 60 * 1000,
)
def _update_min_depth_for_room_txn(self, txn, room_id, depth):
@@ -548,9 +549,11 @@ class EventFederationStore(EventFederationWorkerStore):
sql,
(self.stream_ordering_month_ago, self.stream_ordering_month_ago,)
)
- return self.runInteraction(
+ return run_as_background_process(
+ "delete_old_forward_extrem_cache",
+ self.runInteraction,
"_delete_old_forward_extrem_cache",
- _delete_old_forward_extrem_cache_txn
+ _delete_old_forward_extrem_cache_txn,
)
def clean_room_for_join(self, room_id):
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 29b511ae..68403206 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -22,6 +22,7 @@ from canonicaljson import json
from twisted.internet import defer
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import LoggingTransaction, SQLBaseStore
from synapse.util.caches.descriptors import cachedInlineCallbacks
@@ -458,11 +459,12 @@ class EventPushActionsWorkerStore(SQLBaseStore):
"Error removing push actions after event persistence failure",
)
- @defer.inlineCallbacks
def _find_stream_orderings_for_times(self):
- yield self.runInteraction(
+ return run_as_background_process(
+ "event_push_action_stream_orderings",
+ self.runInteraction,
"_find_stream_orderings_for_times",
- self._find_stream_orderings_for_times_txn
+ self._find_stream_orderings_for_times_txn,
)
def _find_stream_orderings_for_times_txn(self, txn):
@@ -604,7 +606,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
self._doing_notif_rotation = False
self._rotate_notif_loop = self._clock.looping_call(
- self._rotate_notifs, 30 * 60 * 1000
+ self._start_rotate_notifs, 30 * 60 * 1000,
)
def _set_push_actions_for_event_and_users_txn(self, txn, events_and_contexts,
@@ -787,6 +789,9 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
WHERE room_id = ? AND user_id = ? AND stream_ordering <= ?
""", (room_id, user_id, stream_ordering))
+ def _start_rotate_notifs(self):
+ return run_as_background_process("rotate_notifs", self._rotate_notifs)
+
@defer.inlineCallbacks
def _rotate_notifs(self):
if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 2aaab0d0..e8e5a0fe 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -19,7 +19,7 @@ import logging
from collections import OrderedDict, deque, namedtuple
from functools import wraps
-from six import iteritems, itervalues
+from six import iteritems
from six.moves import range
from canonicaljson import json
@@ -33,6 +33,9 @@ from synapse.api.errors import SynapseError
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.background_updates import BackgroundUpdateStore
+from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util.async import ObservableDeferred
@@ -64,7 +67,13 @@ state_delta_reuse_delta_counter = Counter(
def encode_json(json_object):
- return frozendict_json_encoder.encode(json_object)
+ """
+ Encode a Python object as JSON and return it in a Unicode string.
+ """
+ out = frozendict_json_encoder.encode(json_object)
+ if isinstance(out, bytes):
+ out = out.decode('utf8')
+ return out
class _EventPeristenceQueue(object):
@@ -141,25 +150,22 @@ class _EventPeristenceQueue(object):
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
- # handle_queue_loop runs in the sentinel logcontext, so
- # there is no need to preserve_fn when running the
- # callbacks on the deferred.
try:
ret = yield per_item_callback(item)
- item.deferred.callback(ret)
except Exception:
- item.deferred.errback()
+ with PreserveLoggingContext():
+ item.deferred.errback()
+ else:
+ with PreserveLoggingContext():
+ item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
- # set handle_queue_loop off on the background. We don't want to
- # attribute work done in it to the current request, so we drop the
- # logcontext altogether.
- with PreserveLoggingContext():
- handle_queue_loop()
+ # set handle_queue_loop off in the background
+ run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
@@ -195,7 +201,9 @@ def _retry_on_integrity_error(func):
return f
-class EventsStore(EventsWorkerStore):
+# inherits from EventFederationStore so that we can call _update_backward_extremities
+# and _handle_mult_prev_events (though arguably those could both be moved in here)
+class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
@@ -233,12 +241,18 @@ class EventsStore(EventsWorkerStore):
self._state_resolution_handler = hs.get_state_resolution_handler()
+ @defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False):
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
- backfilled: ?
+ backfilled (bool): Whether the results are retrieved from federation
+ via backfill or not. Used to determine if they're "new" events
+ which might update the current state etc.
+
+ Returns:
+ Deferred[int]: the stream ordering of the latest persisted event
"""
partitioned = {}
for event, ctx in events_and_contexts:
@@ -255,10 +269,14 @@ class EventsStore(EventsWorkerStore):
for room_id in partitioned:
self._maybe_start_persisting(room_id)
- return make_deferred_yieldable(
+ yield make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
+ max_persisted_id = yield self._stream_id_gen.get_current_token()
+
+ defer.returnValue(max_persisted_id)
+
@defer.inlineCallbacks
@log_function
def persist_event(self, event, context, backfilled=False):
@@ -345,11 +363,14 @@ class EventsStore(EventsWorkerStore):
new_forward_extremeties = {}
# map room_id->(type,state_key)->event_id tracking the full
- # state in each room after adding these events
+ # state in each room after adding these events.
+ # This is simply used to prefill the get_current_state_ids
+ # cache
current_state_for_room = {}
- # map room_id->(to_delete, to_insert) where each entry is
- # a map (type,key)->event_id giving the state delta in each
+ # map room_id->(to_delete, to_insert) where to_delete is a list
+ # of type/state keys to remove from current state, and to_insert
+ # is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room = {}
@@ -419,19 +440,40 @@ class EventsStore(EventsWorkerStore):
logger.info(
"Calculating state delta for room %s", room_id,
)
- current_state = yield self._get_new_state_after_events(
- room_id,
- ev_ctx_rm,
- latest_event_ids,
- new_latest_event_ids,
- )
+ with Measure(
+ self._clock,
+ "persist_events.get_new_state_after_events",
+ ):
+ res = yield self._get_new_state_after_events(
+ room_id,
+ ev_ctx_rm,
+ latest_event_ids,
+ new_latest_event_ids,
+ )
+ current_state, delta_ids = res
+
+ # If either are not None then there has been a change,
+ # and we need to work out the delta (or use that
+ # given)
+ if delta_ids is not None:
+ # If there is a delta we know that we've
+ # only added or replaced state, never
+ # removed keys entirely.
+ state_delta_for_room[room_id] = ([], delta_ids)
+ elif current_state is not None:
+ with Measure(
+ self._clock,
+ "persist_events.calculate_state_delta",
+ ):
+ delta = yield self._calculate_state_delta(
+ room_id, current_state,
+ )
+ state_delta_for_room[room_id] = delta
+
+ # If we have the current_state then lets prefill
+ # the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
- delta = yield self._calculate_state_delta(
- room_id, current_state,
- )
- if delta is not None:
- state_delta_for_room[room_id] = delta
yield self.runInteraction(
"persist_events",
@@ -498,7 +540,6 @@ class EventsStore(EventsWorkerStore):
iterable=list(new_latest_event_ids),
retcols=["prev_event_id"],
keyvalues={
- "room_id": room_id,
"is_state": False,
},
desc="_calculate_new_extremeties",
@@ -530,9 +571,15 @@ class EventsStore(EventsWorkerStore):
the new forward extremities for the room.
Returns:
- Deferred[dict[(str,str), str]|None]:
- None if there are no changes to the room state, or
- a dict of (type, state_key) -> event_id].
+ Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
+ Returns a tuple of two state maps, the first being the full new current
+ state and the second being the delta to the existing current state.
+ If both are None then there has been no change.
+
+ If there has been a change then we only return the delta if its
+ already been calculated. Conversely if we do know the delta then
+ the new current state is only returned if we've already calculated
+ it.
"""
if not new_latest_event_ids:
@@ -540,18 +587,32 @@ class EventsStore(EventsWorkerStore):
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
+
+ # Map from (prev state group, new state group) -> delta state dict
+ state_group_deltas = {}
+
for ev, ctx in events_context:
if ctx.state_group is None:
- # I don't think this can happen, but let's double-check
- raise Exception(
- "Context for new extremity event %s has no state "
- "group" % (ev.event_id, ),
- )
+ # This should only happen for outlier events.
+ if not ev.internal_metadata.is_outlier():
+ raise Exception(
+ "Context for new event %s has no state "
+ "group" % (ev.event_id, ),
+ )
+ continue
if ctx.state_group in state_groups_map:
continue
- state_groups_map[ctx.state_group] = ctx.current_state_ids
+ # We're only interested in pulling out state that has already
+ # been cached in the context. We'll pull stuff out of the DB later
+ # if necessary.
+ current_state_ids = ctx.get_cached_current_state_ids()
+ if current_state_ids is not None:
+ state_groups_map[ctx.state_group] = current_state_ids
+
+ if ctx.prev_group:
+ state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
@@ -566,7 +627,7 @@ class EventsStore(EventsWorkerStore):
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
- if event_id == ev.event_id:
+ if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
@@ -594,7 +655,26 @@ class EventsStore(EventsWorkerStore):
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
- return
+ defer.returnValue((None, None))
+
+ if len(new_state_groups) == 1 and len(old_state_groups) == 1:
+ # If we're going from one state group to another, lets check if
+ # we have a delta for that transition. If we do then we can just
+ # return that.
+
+ new_state_group = next(iter(new_state_groups))
+ old_state_group = next(iter(old_state_groups))
+
+ delta_ids = state_group_deltas.get(
+ (old_state_group, new_state_group,), None
+ )
+ if delta_ids is not None:
+ # We have a delta from the existing to new current state,
+ # so lets just return that. If we happen to already have
+ # the current state in memory then lets also return that,
+ # but it doesn't matter if we don't.
+ new_state = state_groups_map.get(new_state_group)
+ defer.returnValue((new_state, delta_ids))
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
@@ -606,7 +686,7 @@ class EventsStore(EventsWorkerStore):
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
- defer.returnValue(state_groups_map[new_state_groups.pop()])
+ defer.returnValue((state_groups_map[new_state_groups.pop()], None))
# Ok, we need to defer to the state handler to resolve our state sets.
@@ -625,7 +705,7 @@ class EventsStore(EventsWorkerStore):
room_id, state_groups, events_map, get_events
)
- defer.returnValue(res.state)
+ defer.returnValue((res.state, None))
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
@@ -634,28 +714,20 @@ class EventsStore(EventsWorkerStore):
Assumes that we are only persisting events for one room at a time.
Returns:
- 2-tuple (to_delete, to_insert) where both are state dicts,
- i.e. (type, state_key) -> event_id. `to_delete` are the entries to
- first be deleted from current_state_events, `to_insert` are entries
- to insert.
+ tuple[list, dict] (to_delete, to_insert): where to_delete are the
+ type/state_keys to remove from current_state_events and `to_insert`
+ are the updates to current_state_events.
"""
existing_state = yield self.get_current_state_ids(room_id)
- existing_events = set(itervalues(existing_state))
- new_events = set(ev_id for ev_id in itervalues(current_state))
- changed_events = existing_events ^ new_events
-
- if not changed_events:
- return
+ to_delete = [
+ key for key in existing_state
+ if key not in current_state
+ ]
- to_delete = {
- key: ev_id for key, ev_id in iteritems(existing_state)
- if ev_id in changed_events
- }
- events_to_insert = (new_events - existing_events)
to_insert = {
key: ev_id for key, ev_id in iteritems(current_state)
- if ev_id in events_to_insert
+ if ev_id != existing_state.get(key)
}
defer.returnValue((to_delete, to_insert))
@@ -678,10 +750,10 @@ class EventsStore(EventsWorkerStore):
delete_existing (bool): True to purge existing table rows for the
events from the database. This is useful when retrying due to
IntegrityError.
- state_delta_for_room (dict[str, (list[str], list[str])]):
+ state_delta_for_room (dict[str, (list, dict)]):
The current-state delta for each room. For each room, a tuple
- (to_delete, to_insert), being a list of event ids to be removed
- from the current state, and a list of event ids to be added to
+ (to_delete, to_insert), being a list of type/state keys to be
+ removed from the current state, and a state set to be added to
the current state.
new_forward_extremeties (dict[str, list[str]]):
The new forward extremities for each room. For each room, a
@@ -759,9 +831,46 @@ class EventsStore(EventsWorkerStore):
def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
+
+ # First we add entries to the current_state_delta_stream. We
+ # do this before updating the current_state_events table so
+ # that we can use it to calculate the `prev_event_id`. (This
+ # allows us to not have to pull out the existing state
+ # unnecessarily).
+ sql = """
+ INSERT INTO current_state_delta_stream
+ (stream_id, room_id, type, state_key, event_id, prev_event_id)
+ SELECT ?, ?, ?, ?, ?, (
+ SELECT event_id FROM current_state_events
+ WHERE room_id = ? AND type = ? AND state_key = ?
+ )
+ """
+ txn.executemany(sql, (
+ (
+ max_stream_order, room_id, etype, state_key, None,
+ room_id, etype, state_key,
+ )
+ for etype, state_key in to_delete
+ # We sanity check that we're deleting rather than updating
+ if (etype, state_key) not in to_insert
+ ))
+ txn.executemany(sql, (
+ (
+ max_stream_order, room_id, etype, state_key, ev_id,
+ room_id, etype, state_key,
+ )
+ for (etype, state_key), ev_id in iteritems(to_insert)
+ ))
+
+ # Now we actually update the current_state_events table
+
txn.executemany(
- "DELETE FROM current_state_events WHERE event_id = ?",
- [(ev_id,) for ev_id in itervalues(to_delete)],
+ "DELETE FROM current_state_events"
+ " WHERE room_id = ? AND type = ? AND state_key = ?",
+ (
+ (room_id, etype, state_key)
+ for etype, state_key in itertools.chain(to_delete, to_insert)
+ ),
)
self._simple_insert_many_txn(
@@ -778,25 +887,6 @@ class EventsStore(EventsWorkerStore):
],
)
- state_deltas = {key: None for key in to_delete}
- state_deltas.update(to_insert)
-
- self._simple_insert_many_txn(
- txn,
- table="current_state_delta_stream",
- values=[
- {
- "stream_id": max_stream_order,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": ev_id,
- "prev_event_id": to_delete.get(key, None),
- }
- for key, ev_id in iteritems(state_deltas)
- ]
- )
-
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id, max_stream_order,
@@ -810,7 +900,8 @@ class EventsStore(EventsWorkerStore):
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
- state_key for ev_type, state_key in state_deltas
+ state_key
+ for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
@@ -983,7 +1074,7 @@ class EventsStore(EventsWorkerStore):
metadata_json = encode_json(
event.internal_metadata.get_dict()
- ).decode("UTF-8")
+ )
sql = (
"UPDATE event_json SET internal_metadata = ?"
@@ -1066,7 +1157,7 @@ class EventsStore(EventsWorkerStore):
):
txn.executemany(
"DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
- [(ev.event_id,) for ev, _ in events_and_contexts]
+ [(ev.room_id, ev.event_id) for ev, _ in events_and_contexts]
)
def _store_event_txn(self, txn, events_and_contexts):
@@ -1097,8 +1188,8 @@ class EventsStore(EventsWorkerStore):
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
- ).decode("UTF-8"),
- "json": encode_json(event_dict(event)).decode("UTF-8"),
+ ),
+ "json": encode_json(event_dict(event)),
}
for event, _ in events_and_contexts
],
@@ -1117,7 +1208,6 @@ class EventsStore(EventsWorkerStore):
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
- "content": encode_json(event.content).decode("UTF-8"),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index 67433606..9b4cfeb8 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -19,12 +19,13 @@ from canonicaljson import json
from twisted.internet import defer
-from synapse.api.errors import SynapseError
+from synapse.api.errors import NotFoundError
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
from synapse.events import FrozenEvent
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.events.utils import prune_event
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.logcontext import (
LoggingContext,
PreserveLoggingContext,
@@ -76,7 +77,7 @@ class EventsWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def get_event(self, event_id, check_redacted=True,
get_prev_content=False, allow_rejected=False,
- allow_none=False):
+ allow_none=False, check_room_id=None):
"""Get an event from the database by event_id.
Args:
@@ -87,7 +88,9 @@ class EventsWorkerStore(SQLBaseStore):
include the previous states content in the unsigned field.
allow_rejected (bool): If True return rejected events.
allow_none (bool): If True, return None if no event found, if
- False throw an exception.
+ False throw a NotFoundError
+ check_room_id (str|None): if not None, check the room of the found event.
+ If there is a mismatch, behave as per allow_none.
Returns:
Deferred : A FrozenEvent.
@@ -99,10 +102,16 @@ class EventsWorkerStore(SQLBaseStore):
allow_rejected=allow_rejected,
)
- if not events and not allow_none:
- raise SynapseError(404, "Could not find event %s" % (event_id,))
+ event = events[0] if events else None
- defer.returnValue(events[0] if events else None)
+ if event is not None and check_room_id is not None:
+ if event.room_id != check_room_id:
+ event = None
+
+ if event is None and not allow_none:
+ raise NotFoundError("Could not find event %s" % (event_id,))
+
+ defer.returnValue(event)
@defer.inlineCallbacks
def get_events(self, event_ids, check_redacted=True,
@@ -322,10 +331,11 @@ class EventsWorkerStore(SQLBaseStore):
should_start = False
if should_start:
- with PreserveLoggingContext():
- self.runWithConnection(
- self._do_fetch
- )
+ run_as_background_process(
+ "fetch_events",
+ self.runWithConnection,
+ self._do_fetch,
+ )
logger.debug("Loading %d events", len(events))
with PreserveLoggingContext():
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index be655d28..6a502896 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -21,7 +21,6 @@ from canonicaljson import json
from twisted.internet import defer
-from synapse.api.constants import EventTypes
from synapse.push.baserules import list_with_base_rules
from synapse.storage.appservice import ApplicationServiceWorkerStore
from synapse.storage.pusher import PusherWorkerStore
@@ -186,6 +185,7 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
defer.returnValue(results)
+ @defer.inlineCallbacks
def bulk_get_push_rules_for_room(self, event, context):
state_group = context.state_group
if not state_group:
@@ -195,9 +195,11 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
# To do this we set the state_group to a new object as object() != object()
state_group = object()
- return self._bulk_get_push_rules_for_room(
- event.room_id, state_group, context.current_state_ids, event=event
+ current_state_ids = yield context.get_current_state_ids(self)
+ result = yield self._bulk_get_push_rules_for_room(
+ event.room_id, state_group, current_state_ids, event=event
)
+ defer.returnValue(result)
@cachedInlineCallbacks(num_args=2, cache_context=True)
def _bulk_get_push_rules_for_room(self, room_id, state_group, current_state_ids,
@@ -247,18 +249,6 @@ class PushRulesWorkerStore(ApplicationServiceWorkerStore,
if uid in local_users_in_room:
user_ids.add(uid)
- forgotten = yield self.who_forgot_in_room(
- event.room_id, on_invalidate=cache_context.invalidate,
- )
-
- for row in forgotten:
- user_id = row["user_id"]
- event_id = row["event_id"]
-
- mem_id = current_state_ids.get((EventTypes.Member, user_id), None)
- if event_id == mem_id:
- user_ids.discard(user_id)
-
rules_by_user = yield self.bulk_get_push_rules(
user_ids, on_invalidate=cache_context.invalidate,
)
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index cc273a57..8443bd4c 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -233,7 +233,7 @@ class PusherStore(PusherWorkerStore):
)
if newly_inserted:
- self.runInteraction(
+ yield self.runInteraction(
"add_pusher",
self._invalidate_cache_and_stream,
self.get_if_user_has_pusher, (user_id,)
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 02a802be..10dce21c 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -24,7 +24,7 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
-from synapse.storage.events import EventsWorkerStore
+from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import get_domain_from_id
from synapse.util.async import Linearizer
from synapse.util.caches import intern_string
@@ -232,6 +232,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
defer.returnValue(user_who_share_room)
+ @defer.inlineCallbacks
def get_joined_users_from_context(self, event, context):
state_group = context.state_group
if not state_group:
@@ -241,11 +242,13 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# To do this we set the state_group to a new object as object() != object()
state_group = object()
- return self._get_joined_users_from_context(
- event.room_id, state_group, context.current_state_ids,
+ current_state_ids = yield context.get_current_state_ids(self)
+ result = yield self._get_joined_users_from_context(
+ event.room_id, state_group, current_state_ids,
event=event,
context=context,
)
+ defer.returnValue(result)
def get_joined_users_from_state(self, room_id, state_entry):
state_group = state_entry.state_group
@@ -458,17 +461,29 @@ class RoomMemberWorkerStore(EventsWorkerStore):
def _get_joined_hosts_cache(self, room_id):
return _JoinedHostsCache(self, room_id)
- @cached()
- def who_forgot_in_room(self, room_id):
- return self._simple_select_list(
- table="room_memberships",
- retcols=("user_id", "event_id"),
- keyvalues={
- "room_id": room_id,
- "forgotten": 1,
- },
- desc="who_forgot"
- )
+ @cachedInlineCallbacks(num_args=2)
+ def did_forget(self, user_id, room_id):
+ """Returns whether user_id has elected to discard history for room_id.
+
+ Returns False if they have since re-joined."""
+ def f(txn):
+ sql = (
+ "SELECT"
+ " COUNT(*)"
+ " FROM"
+ " room_memberships"
+ " WHERE"
+ " user_id = ?"
+ " AND"
+ " room_id = ?"
+ " AND"
+ " forgotten = 0"
+ )
+ txn.execute(sql, (user_id, room_id))
+ rows = txn.fetchall()
+ return rows[0][0]
+ count = yield self.runInteraction("did_forget_membership", f)
+ defer.returnValue(count == 0)
class RoomMemberStore(RoomMemberWorkerStore):
@@ -577,36 +592,11 @@ class RoomMemberStore(RoomMemberWorkerStore):
)
txn.execute(sql, (user_id, room_id))
- txn.call_after(self.did_forget.invalidate, (user_id, room_id))
self._invalidate_cache_and_stream(
- txn, self.who_forgot_in_room, (room_id,)
+ txn, self.did_forget, (user_id, room_id,),
)
return self.runInteraction("forget_membership", f)
- @cachedInlineCallbacks(num_args=2)
- def did_forget(self, user_id, room_id):
- """Returns whether user_id has elected to discard history for room_id.
-
- Returns False if they have since re-joined."""
- def f(txn):
- sql = (
- "SELECT"
- " COUNT(*)"
- " FROM"
- " room_memberships"
- " WHERE"
- " user_id = ?"
- " AND"
- " room_id = ?"
- " AND"
- " forgotten = 0"
- )
- txn.execute(sql, (user_id, room_id))
- rows = txn.fetchall()
- return rows[0][0]
- count = yield self.runInteraction("did_forget_membership", f)
- defer.returnValue(count == 0)
-
@defer.inlineCallbacks
def _background_add_membership_profile(self, progress, batch_size):
target_min_stream_id = progress.get(
diff --git a/synapse/storage/schema/delta/50/make_event_content_nullable.py b/synapse/storage/schema/delta/50/make_event_content_nullable.py
new file mode 100644
index 00000000..6dd467b6
--- /dev/null
+++ b/synapse/storage/schema/delta/50/make_event_content_nullable.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+We want to stop populating 'event.content', so we need to make it nullable.
+
+If this has to be rolled back, then the following should populate the missing data:
+
+Postgres:
+
+ UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
+ WHERE ej.event_id = events.event_id AND
+ stream_ordering < (
+ SELECT stream_ordering FROM events WHERE content IS NOT NULL
+ ORDER BY stream_ordering LIMIT 1
+ );
+
+ UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
+ WHERE ej.event_id = events.event_id AND
+ stream_ordering > (
+ SELECT stream_ordering FROM events WHERE content IS NOT NULL
+ ORDER BY stream_ordering DESC LIMIT 1
+ );
+
+SQLite:
+
+ UPDATE events SET content=(
+ SELECT json_extract(json,'$.content') FROM event_json ej
+ WHERE ej.event_id = events.event_id
+ )
+ WHERE
+ stream_ordering < (
+ SELECT stream_ordering FROM events WHERE content IS NOT NULL
+ ORDER BY stream_ordering LIMIT 1
+ )
+ OR stream_ordering > (
+ SELECT stream_ordering FROM events WHERE content IS NOT NULL
+ ORDER BY stream_ordering DESC LIMIT 1
+ );
+
+"""
+
+import logging
+
+from synapse.storage.engines import PostgresEngine
+
+logger = logging.getLogger(__name__)
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+ pass
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ if isinstance(database_engine, PostgresEngine):
+ cur.execute("""
+ ALTER TABLE events ALTER COLUMN content DROP NOT NULL;
+ """)
+ return
+
+ # sqlite is an arse about this. ref: https://www.sqlite.org/lang_altertable.html
+
+ cur.execute("SELECT sql FROM sqlite_master WHERE tbl_name='events' AND type='table'")
+ (oldsql,) = cur.fetchone()
+
+ sql = oldsql.replace("content TEXT NOT NULL", "content TEXT")
+ if sql == oldsql:
+ raise Exception("Couldn't find null constraint to drop in %s" % oldsql)
+
+ logger.info("Replacing definition of 'events' with: %s", sql)
+
+ cur.execute("PRAGMA schema_version")
+ (oldver,) = cur.fetchone()
+ cur.execute("PRAGMA writable_schema=ON")
+ cur.execute(
+ "UPDATE sqlite_master SET sql=? WHERE tbl_name='events' AND type='table'",
+ (sql, ),
+ )
+ cur.execute("PRAGMA schema_version=%i" % (oldver + 1,))
+ cur.execute("PRAGMA writable_schema=OFF")
diff --git a/synapse/storage/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/full_schemas/16/event_edges.sql
index 52eec883..6b5a5a88 100644
--- a/synapse/storage/schema/full_schemas/16/event_edges.sql
+++ b/synapse/storage/schema/full_schemas/16/event_edges.sql
@@ -37,7 +37,8 @@ CREATE TABLE IF NOT EXISTS event_edges(
event_id TEXT NOT NULL,
prev_event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
- is_state BOOL NOT NULL,
+ is_state BOOL NOT NULL, -- true if this is a prev_state edge rather than a regular
+ -- event dag edge.
UNIQUE (event_id, prev_event_id, room_id, is_state)
);
diff --git a/synapse/storage/schema/full_schemas/16/im.sql b/synapse/storage/schema/full_schemas/16/im.sql
index ba534680..5f5cb8d0 100644
--- a/synapse/storage/schema/full_schemas/16/im.sql
+++ b/synapse/storage/schema/full_schemas/16/im.sql
@@ -19,7 +19,12 @@ CREATE TABLE IF NOT EXISTS events(
event_id TEXT NOT NULL,
type TEXT NOT NULL,
room_id TEXT NOT NULL,
- content TEXT NOT NULL,
+
+ -- 'content' used to be created NULLable, but as of delta 50 we drop that constraint.
+ -- the hack we use to drop the constraint doesn't work for an in-memory sqlite
+ -- database, which breaks the sytests. Hence, we no longer make it nullable.
+ content TEXT,
+
unrecognized_keys TEXT,
processed BOOL NOT NULL,
outlier BOOL NOT NULL,
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index 470212aa..5623391f 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -74,7 +74,7 @@ class SignatureWorkerStore(SQLBaseStore):
txn (cursor):
event_id (str): Id for the Event.
Returns:
- A dict of algorithm -> hash.
+ A dict[unicode, bytes] of algorithm -> hash.
"""
query = (
"SELECT algorithm, hash"
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 89a05c46..b27b3ae1 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -186,7 +186,17 @@ class StateGroupWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def _get_state_groups_from_groups(self, groups, types):
- """Returns dictionary state_group -> (dict of (type, state_key) -> event id)
+ """Returns the state groups for a given set of groups, filtering on
+ types of state events.
+
+ Args:
+ groups(list[int]): list of state group IDs to query
+ types (Iterable[str, str|None]|None): list of 2-tuples of the form
+ (`type`, `state_key`), where a `state_key` of `None` matches all
+ state_keys for the `type`. If None, all types are returned.
+
+ Returns:
+ dictionary state_group -> (dict of (type, state_key) -> event id)
"""
results = {}
@@ -200,8 +210,11 @@ class StateGroupWorkerStore(SQLBaseStore):
defer.returnValue(results)
- def _get_state_groups_from_groups_txn(self, txn, groups, types=None):
+ def _get_state_groups_from_groups_txn(
+ self, txn, groups, types=None,
+ ):
results = {group: {} for group in groups}
+
if types is not None:
types = list(set(types)) # deduplicate types list
@@ -239,7 +252,7 @@ class StateGroupWorkerStore(SQLBaseStore):
# Turns out that postgres doesn't like doing a list of OR's and
# is about 1000x slower, so we just issue a query for each specific
# type seperately.
- if types:
+ if types is not None:
clause_to_args = [
(
"AND type = ? AND state_key = ?",
@@ -278,6 +291,7 @@ class StateGroupWorkerStore(SQLBaseStore):
else:
where_clauses.append("(type = ? AND state_key = ?)")
where_args.extend([typ[0], typ[1]])
+
where_clause = "AND (%s)" % (" OR ".join(where_clauses))
else:
where_clause = ""
@@ -332,16 +346,20 @@ class StateGroupWorkerStore(SQLBaseStore):
return results
@defer.inlineCallbacks
- def get_state_for_events(self, event_ids, types):
+ def get_state_for_events(self, event_ids, types, filtered_types=None):
"""Given a list of event_ids and type tuples, return a list of state
dicts for each event. The state dicts will only have the type/state_keys
that are in the `types` list.
Args:
- event_ids (list)
- types (list): List of (type, state_key) tuples which are used to
- filter the state fetched. `state_key` may be None, which matches
- any `state_key`
+ event_ids (list[string])
+ types (list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ May be None, which matches any key.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
deferred: A list of dicts corresponding to the event_ids given.
@@ -352,7 +370,7 @@ class StateGroupWorkerStore(SQLBaseStore):
)
groups = set(itervalues(event_to_groups))
- group_to_state = yield self._get_state_for_groups(groups, types)
+ group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
state_event_map = yield self.get_events(
[ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
@@ -371,15 +389,19 @@ class StateGroupWorkerStore(SQLBaseStore):
defer.returnValue({event: event_to_state[event] for event in event_ids})
@defer.inlineCallbacks
- def get_state_ids_for_events(self, event_ids, types=None):
+ def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None):
"""
Get the state dicts corresponding to a list of events
Args:
event_ids(list(str)): events whose state should be returned
- types(list[(str, str)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. May be None, which
- matches any key
+ types(list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ May be None, which matches any key.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
A deferred dict from event_id -> (type, state_key) -> state_event
@@ -389,7 +411,7 @@ class StateGroupWorkerStore(SQLBaseStore):
)
groups = set(itervalues(event_to_groups))
- group_to_state = yield self._get_state_for_groups(groups, types)
+ group_to_state = yield self._get_state_for_groups(groups, types, filtered_types)
event_to_state = {
event_id: group_to_state[group]
@@ -399,37 +421,45 @@ class StateGroupWorkerStore(SQLBaseStore):
defer.returnValue({event: event_to_state[event] for event in event_ids})
@defer.inlineCallbacks
- def get_state_for_event(self, event_id, types=None):
+ def get_state_for_event(self, event_id, types=None, filtered_types=None):
"""
Get the state dict corresponding to a particular event
Args:
event_id(str): event whose state should be returned
- types(list[(str, str)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. May be None, which
- matches any key
+ types(list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ May be None, which matches any key.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
A deferred dict from (type, state_key) -> state_event
"""
- state_map = yield self.get_state_for_events([event_id], types)
+ state_map = yield self.get_state_for_events([event_id], types, filtered_types)
defer.returnValue(state_map[event_id])
@defer.inlineCallbacks
- def get_state_ids_for_event(self, event_id, types=None):
+ def get_state_ids_for_event(self, event_id, types=None, filtered_types=None):
"""
Get the state dict corresponding to a particular event
Args:
event_id(str): event whose state should be returned
- types(list[(str, str)]|None): List of (type, state_key) tuples
- which are used to filter the state fetched. May be None, which
- matches any key
+ types(list[(str, str|None)]|None): List of (type, state_key) tuples
+ which are used to filter the state fetched. If `state_key` is None,
+ all events are returned of the given type.
+ May be None, which matches any key.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
A deferred dict from (type, state_key) -> state_event
"""
- state_map = yield self.get_state_ids_for_events([event_id], types)
+ state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types)
defer.returnValue(state_map[event_id])
@cached(max_entries=50000)
@@ -460,56 +490,73 @@ class StateGroupWorkerStore(SQLBaseStore):
defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
- def _get_some_state_from_cache(self, group, types):
+ def _get_some_state_from_cache(self, group, types, filtered_types=None):
"""Checks if group is in cache. See `_get_state_for_groups`
- Returns 3-tuple (`state_dict`, `missing_types`, `got_all`).
- `missing_types` is the list of types that aren't in the cache for that
- group. `got_all` is a bool indicating if we successfully retrieved all
+ Args:
+ group(int): The state group to lookup
+ types(list[str, str|None]): List of 2-tuples of the form
+ (`type`, `state_key`), where a `state_key` of `None` matches all
+ state_keys for the `type`.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
+
+ Returns 2-tuple (`state_dict`, `got_all`).
+ `got_all` is a bool indicating if we successfully retrieved all
requests state from the cache, if False we need to query the DB for the
missing state.
-
- Args:
- group: The state group to lookup
- types (list): List of 2-tuples of the form (`type`, `state_key`),
- where a `state_key` of `None` matches all state_keys for the
- `type`.
"""
is_all, known_absent, state_dict_ids = self._state_group_cache.get(group)
type_to_key = {}
- missing_types = set()
+
+ # tracks whether any of ourrequested types are missing from the cache
+ missing_types = False
for typ, state_key in types:
key = (typ, state_key)
- if state_key is None:
+
+ if (
+ state_key is None or
+ (filtered_types is not None and typ not in filtered_types)
+ ):
type_to_key[typ] = None
- missing_types.add(key)
+ # we mark the type as missing from the cache because
+ # when the cache was populated it might have been done with a
+ # restricted set of state_keys, so the wildcard will not work
+ # and the cache may be incomplete.
+ missing_types = True
else:
if type_to_key.get(typ, object()) is not None:
type_to_key.setdefault(typ, set()).add(state_key)
if key not in state_dict_ids and key not in known_absent:
- missing_types.add(key)
+ missing_types = True
sentinel = object()
def include(typ, state_key):
valid_state_keys = type_to_key.get(typ, sentinel)
if valid_state_keys is sentinel:
- return False
+ return filtered_types is not None and typ not in filtered_types
if valid_state_keys is None:
return True
if state_key in valid_state_keys:
return True
return False
- got_all = is_all or not missing_types
+ got_all = is_all
+ if not got_all:
+ # the cache is incomplete. We may still have got all the results we need, if
+ # we don't have any wildcards in the match list.
+ if not missing_types and filtered_types is None:
+ got_all = True
return {
k: v for k, v in iteritems(state_dict_ids)
if include(k[0], k[1])
- }, missing_types, got_all
+ }, got_all
def _get_all_state_from_cache(self, group):
"""Checks if group is in cache. See `_get_state_for_groups`
@@ -526,7 +573,7 @@ class StateGroupWorkerStore(SQLBaseStore):
return state_dict_ids, is_all
@defer.inlineCallbacks
- def _get_state_for_groups(self, groups, types=None):
+ def _get_state_for_groups(self, groups, types=None, filtered_types=None):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
@@ -540,6 +587,9 @@ class StateGroupWorkerStore(SQLBaseStore):
Otherwise, each entry should be a `(type, state_key)` tuple to
include in the response. A `state_key` of None is a wildcard
meaning that we require all state with that type.
+ filtered_types(list[str]|None): Only apply filtering via `types` to this
+ list of event types. Other types of events are returned unfiltered.
+ If None, `types` filtering is applied to all events.
Returns:
Deferred[dict[int, dict[(type, state_key), EventBase]]]
@@ -551,8 +601,8 @@ class StateGroupWorkerStore(SQLBaseStore):
missing_groups = []
if types is not None:
for group in set(groups):
- state_dict_ids, _, got_all = self._get_some_state_from_cache(
- group, types,
+ state_dict_ids, got_all = self._get_some_state_from_cache(
+ group, types, filtered_types
)
results[group] = state_dict_ids
@@ -579,13 +629,13 @@ class StateGroupWorkerStore(SQLBaseStore):
# cache. Hence, if we are doing a wildcard lookup, populate the
# cache fully so that we can do an efficient lookup next time.
- if types and any(k is None for (t, k) in types):
+ if filtered_types or (types and any(k is None for (t, k) in types)):
types_to_fetch = None
else:
types_to_fetch = types
group_to_state_dict = yield self._get_state_groups_from_groups(
- missing_groups, types_to_fetch,
+ missing_groups, types_to_fetch
)
for group, group_state_dict in iteritems(group_to_state_dict):
@@ -595,7 +645,10 @@ class StateGroupWorkerStore(SQLBaseStore):
if types:
for k, v in iteritems(group_state_dict):
(typ, _) = k
- if k in types or (typ, None) in types:
+ if (
+ (k in types or (typ, None) in types) or
+ (filtered_types and typ not in filtered_types)
+ ):
state_dict[k] = v
else:
state_dict.update(group_state_dict)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 66856342..b9f2b74a 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -43,7 +43,7 @@ from twisted.internet import defer
from synapse.storage._base import SQLBaseStore
from synapse.storage.engines import PostgresEngine
-from synapse.storage.events import EventsWorkerStore
+from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import RoomStreamToken
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
@@ -527,7 +527,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
)
@defer.inlineCallbacks
- def get_events_around(self, room_id, event_id, before_limit, after_limit):
+ def get_events_around(
+ self, room_id, event_id, before_limit, after_limit, event_filter=None,
+ ):
"""Retrieve events and pagination tokens around a given event in a
room.
@@ -536,6 +538,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
event_id (str)
before_limit (int)
after_limit (int)
+ event_filter (Filter|None)
Returns:
dict
@@ -543,7 +546,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
results = yield self.runInteraction(
"get_events_around", self._get_events_around_txn,
- room_id, event_id, before_limit, after_limit
+ room_id, event_id, before_limit, after_limit, event_filter,
)
events_before = yield self._get_events(
@@ -563,7 +566,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"end": results["after"]["token"],
})
- def _get_events_around_txn(self, txn, room_id, event_id, before_limit, after_limit):
+ def _get_events_around_txn(
+ self, txn, room_id, event_id, before_limit, after_limit, event_filter,
+ ):
"""Retrieves event_ids and pagination tokens around a given event in a
room.
@@ -572,6 +577,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
event_id (str)
before_limit (int)
after_limit (int)
+ event_filter (Filter|None)
Returns:
dict
@@ -601,11 +607,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
rows, start_token = self._paginate_room_events_txn(
txn, room_id, before_token, direction='b', limit=before_limit,
+ event_filter=event_filter,
)
events_before = [r.event_id for r in rows]
rows, end_token = self._paginate_room_events_txn(
txn, room_id, after_token, direction='f', limit=after_limit,
+ event_filter=event_filter,
)
events_after = [r.event_id for r in rows]
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index c3bc94f5..428e7fa3 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -22,6 +22,7 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches.descriptors import cached
from ._base import SQLBaseStore
@@ -57,7 +58,7 @@ class TransactionStore(SQLBaseStore):
def __init__(self, db_conn, hs):
super(TransactionStore, self).__init__(db_conn, hs)
- self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
+ self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
@@ -271,6 +272,11 @@ class TransactionStore(SQLBaseStore):
txn.execute(query, (self._clock.time_msec(),))
return self.cursor_to_dict(txn)
+ def _start_cleanup_transactions(self):
+ return run_as_background_process(
+ "cleanup_transactions", self._cleanup_transactions,
+ )
+
def _cleanup_transactions(self):
now = self._clock.time_msec()
month_ago = now - 30 * 24 * 60 * 60 * 1000
diff --git a/synapse/types.py b/synapse/types.py
index 08f058f7..41afb27a 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -137,7 +137,7 @@ class DomainSpecificString(
@classmethod
def from_string(cls, s):
"""Parse the string given by 's' into a structure object."""
- if len(s) < 1 or s[0] != cls.SIGIL:
+ if len(s) < 1 or s[0:1] != cls.SIGIL:
raise SynapseError(400, "Expected %s string to start with '%s'" % (
cls.__name__, cls.SIGIL,
))
diff --git a/synapse/util/async.py b/synapse/util/async.py
index 5d0fb391..a7094e2f 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,7 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import collections
import logging
from contextlib import contextmanager
@@ -156,54 +157,72 @@ def concurrently_execute(func, args, limit):
class Linearizer(object):
- """Linearizes access to resources based on a key. Useful to ensure only one
- thing is happening at a time on a given resource.
+ """Limits concurrent access to resources based on a key. Useful to ensure
+ only a few things happen at a time on a given resource.
Example:
- with (yield linearizer.queue("test_key")):
+ with (yield limiter.queue("test_key")):
# do some work.
"""
- def __init__(self, name=None, clock=None):
+ def __init__(self, name=None, max_count=1, clock=None):
+ """
+ Args:
+ max_count(int): The maximum number of concurrent accesses
+ """
if name is None:
self.name = id(self)
else:
self.name = name
- self.key_to_defer = {}
if not clock:
from twisted.internet import reactor
clock = Clock(reactor)
self._clock = clock
+ self.max_count = max_count
+
+ # key_to_defer is a map from the key to a 2 element list where
+ # the first element is the number of things executing, and
+ # the second element is an OrderedDict, where the keys are deferreds for the
+ # things blocked from executing.
+ self.key_to_defer = {}
@defer.inlineCallbacks
def queue(self, key):
- # If there is already a deferred in the queue, we pull it out so that
- # we can wait on it later.
- # Then we replace it with a deferred that we resolve *after* the
- # context manager has exited.
- # We only return the context manager after the previous deferred has
- # resolved.
- # This all has the net effect of creating a chain of deferreds that
- # wait for the previous deferred before starting their work.
- current_defer = self.key_to_defer.get(key)
+ entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()])
- new_defer = defer.Deferred()
- self.key_to_defer[key] = new_defer
+ # If the number of things executing is greater than the maximum
+ # then add a deferred to the list of blocked items
+ # When on of the things currently executing finishes it will callback
+ # this item so that it can continue executing.
+ if entry[0] >= self.max_count:
+ new_defer = defer.Deferred()
+ entry[1][new_defer] = 1
- if current_defer:
logger.info(
- "Waiting to acquire linearizer lock %r for key %r", self.name, key
+ "Waiting to acquire linearizer lock %r for key %r", self.name, key,
)
try:
- with PreserveLoggingContext():
- yield current_defer
- except Exception:
- logger.exception("Unexpected exception in Linearizer")
-
- logger.info("Acquired linearizer lock %r for key %r", self.name,
- key)
+ yield make_deferred_yieldable(new_defer)
+ except Exception as e:
+ if isinstance(e, CancelledError):
+ logger.info(
+ "Cancelling wait for linearizer lock %r for key %r",
+ self.name, key,
+ )
+ else:
+ logger.warn(
+ "Unexpected exception waiting for linearizer lock %r for key %r",
+ self.name, key,
+ )
+
+ # we just have to take ourselves back out of the queue.
+ del entry[1][new_defer]
+ raise
+
+ logger.info("Acquired linearizer lock %r for key %r", self.name, key)
+ entry[0] += 1
# if the code holding the lock completes synchronously, then it
# will recursively run the next claimant on the list. That can
@@ -213,15 +232,15 @@ class Linearizer(object):
# In order to break the cycle, we add a cheeky sleep(0) here to
# ensure that we fall back to the reactor between each iteration.
#
- # (There's no particular need for it to happen before we return
- # the context manager, but it needs to happen while we hold the
- # lock, and the context manager's exit code must be synchronous,
- # so actually this is the only sensible place.
+ # (This needs to happen while we hold the lock, and the context manager's exit
+ # code must be synchronous, so this is the only sensible place.)
yield self._clock.sleep(0)
else:
- logger.info("Acquired uncontended linearizer lock %r for key %r",
- self.name, key)
+ logger.info(
+ "Acquired uncontended linearizer lock %r for key %r", self.name, key,
+ )
+ entry[0] += 1
@contextmanager
def _ctx_manager():
@@ -229,73 +248,15 @@ class Linearizer(object):
yield
finally:
logger.info("Releasing linearizer lock %r for key %r", self.name, key)
- with PreserveLoggingContext():
- new_defer.callback(None)
- current_d = self.key_to_defer.get(key)
- if current_d is new_defer:
- self.key_to_defer.pop(key, None)
-
- defer.returnValue(_ctx_manager())
-
-
-class Limiter(object):
- """Limits concurrent access to resources based on a key. Useful to ensure
- only a few thing happen at a time on a given resource.
-
- Example:
-
- with (yield limiter.queue("test_key")):
- # do some work.
-
- """
- def __init__(self, max_count):
- """
- Args:
- max_count(int): The maximum number of concurrent access
- """
- self.max_count = max_count
-
- # key_to_defer is a map from the key to a 2 element list where
- # the first element is the number of things executing
- # the second element is a list of deferreds for the things blocked from
- # executing.
- self.key_to_defer = {}
-
- @defer.inlineCallbacks
- def queue(self, key):
- entry = self.key_to_defer.setdefault(key, [0, []])
-
- # If the number of things executing is greater than the maximum
- # then add a deferred to the list of blocked items
- # When on of the things currently executing finishes it will callback
- # this item so that it can continue executing.
- if entry[0] >= self.max_count:
- new_defer = defer.Deferred()
- entry[1].append(new_defer)
-
- logger.info("Waiting to acquire limiter lock for key %r", key)
- with PreserveLoggingContext():
- yield new_defer
- logger.info("Acquired limiter lock for key %r", key)
- else:
- logger.info("Acquired uncontended limiter lock for key %r", key)
-
- entry[0] += 1
-
- @contextmanager
- def _ctx_manager():
- try:
- yield
- finally:
- logger.info("Releasing limiter lock for key %r", key)
# We've finished executing so check if there are any things
# blocked waiting to execute and start one of them
entry[0] -= 1
if entry[1]:
- next_def = entry[1].pop(0)
+ (next_def, _) = entry[1].popitem(last=False)
+ # we need to run the next thing in the sentinel context.
with PreserveLoggingContext():
next_def.callback(None)
elif entry[0] == 0:
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index f8a07df6..861c2480 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -473,105 +473,101 @@ class CacheListDescriptor(_CacheDescriptorBase):
@functools.wraps(self.orig)
def wrapped(*args, **kwargs):
- # If we're passed a cache_context then we'll want to call its invalidate()
- # whenever we are invalidated
+ # If we're passed a cache_context then we'll want to call its
+ # invalidate() whenever we are invalidated
invalidate_callback = kwargs.pop("on_invalidate", None)
arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
list_args = arg_dict[self.list_name]
- # cached is a dict arg -> deferred, where deferred results in a
- # 2-tuple (`arg`, `result`)
results = {}
- cached_defers = {}
- missing = []
+
+ def update_results_dict(res, arg):
+ results[arg] = res
+
+ # list of deferreds to wait for
+ cached_defers = []
+
+ missing = set()
# If the cache takes a single arg then that is used as the key,
# otherwise a tuple is used.
if num_args == 1:
- def cache_get(arg):
- return cache.get(arg, callback=invalidate_callback)
+ def arg_to_cache_key(arg):
+ return arg
else:
- key = list(keyargs)
+ keylist = list(keyargs)
- def cache_get(arg):
- key[self.list_pos] = arg
- return cache.get(tuple(key), callback=invalidate_callback)
+ def arg_to_cache_key(arg):
+ keylist[self.list_pos] = arg
+ return tuple(keylist)
for arg in list_args:
try:
- res = cache_get(arg)
-
+ res = cache.get(arg_to_cache_key(arg),
+ callback=invalidate_callback)
if not isinstance(res, ObservableDeferred):
results[arg] = res
elif not res.has_succeeded():
res = res.observe()
- res.addCallback(lambda r, arg: (arg, r), arg)
- cached_defers[arg] = res
+ res.addCallback(update_results_dict, arg)
+ cached_defers.append(res)
else:
results[arg] = res.get_result()
except KeyError:
- missing.append(arg)
+ missing.add(arg)
if missing:
+ # we need an observable deferred for each entry in the list,
+ # which we put in the cache. Each deferred resolves with the
+ # relevant result for that key.
+ deferreds_map = {}
+ for arg in missing:
+ deferred = defer.Deferred()
+ deferreds_map[arg] = deferred
+ key = arg_to_cache_key(arg)
+ observable = ObservableDeferred(deferred)
+ cache.set(key, observable, callback=invalidate_callback)
+
+ def complete_all(res):
+ # the wrapped function has completed. It returns a
+ # a dict. We can now resolve the observable deferreds in
+ # the cache and update our own result map.
+ for e in missing:
+ val = res.get(e, None)
+ deferreds_map[e].callback(val)
+ results[e] = val
+
+ def errback(f):
+ # the wrapped function has failed. Invalidate any cache
+ # entries we're supposed to be populating, and fail
+ # their deferreds.
+ for e in missing:
+ key = arg_to_cache_key(e)
+ cache.invalidate(key)
+ deferreds_map[e].errback(f)
+
+ # return the failure, to propagate to our caller.
+ return f
+
args_to_call = dict(arg_dict)
- args_to_call[self.list_name] = missing
+ args_to_call[self.list_name] = list(missing)
- ret_d = defer.maybeDeferred(
+ cached_defers.append(defer.maybeDeferred(
logcontext.preserve_fn(self.function_to_call),
**args_to_call
- )
-
- ret_d = ObservableDeferred(ret_d)
-
- # We need to create deferreds for each arg in the list so that
- # we can insert the new deferred into the cache.
- for arg in missing:
- observer = ret_d.observe()
- observer.addCallback(lambda r, arg: r.get(arg, None), arg)
-
- observer = ObservableDeferred(observer)
-
- if num_args == 1:
- cache.set(
- arg, observer,
- callback=invalidate_callback
- )
-
- def invalidate(f, key):
- cache.invalidate(key)
- return f
- observer.addErrback(invalidate, arg)
- else:
- key = list(keyargs)
- key[self.list_pos] = arg
- cache.set(
- tuple(key), observer,
- callback=invalidate_callback
- )
-
- def invalidate(f, key):
- cache.invalidate(key)
- return f
- observer.addErrback(invalidate, tuple(key))
-
- res = observer.observe()
- res.addCallback(lambda r, arg: (arg, r), arg)
-
- cached_defers[arg] = res
+ ).addCallbacks(complete_all, errback))
if cached_defers:
- def update_results_dict(res):
- results.update(res)
- return results
-
- return logcontext.make_deferred_yieldable(defer.gatherResults(
- list(cached_defers.values()),
+ d = defer.gatherResults(
+ cached_defers,
consumeErrors=True,
- ).addCallback(update_results_dict).addErrback(
+ ).addCallbacks(
+ lambda _: results,
unwrapFirstError
- ))
+ )
+ return logcontext.make_deferred_yieldable(d)
else:
return results
@@ -625,7 +621,8 @@ def cachedList(cached_method_name, list_name, num_args=None, inlineCallbacks=Fal
cache.
Args:
- cache (Cache): The underlying cache to use.
+ cached_method_name (str): The name of the single-item lookup method.
+ This is only used to find the cache to use.
list_name (str): The name of the argument that is the list to use to
do batch lookups in the cache.
num_args (int): Number of arguments to use as the key in the cache
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index 4abca91f..ce85b2ae 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -16,6 +16,7 @@
import logging
from collections import OrderedDict
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches import register_cache
logger = logging.getLogger(__name__)
@@ -63,7 +64,10 @@ class ExpiringCache(object):
return
def f():
- self._prune_cache()
+ return run_as_background_process(
+ "prune_cache_%s" % self._cache_name,
+ self._prune_cache,
+ )
self._clock.looping_call(f, self._expiry_ms / 2)
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index 734331ca..194da876 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -17,20 +17,18 @@ import logging
from twisted.internet import defer
-from synapse.util import unwrapFirstError
-from synapse.util.logcontext import PreserveLoggingContext
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.logcontext import make_deferred_yieldable, run_in_background
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
- with PreserveLoggingContext():
- distributor.fire("user_left_room", user=user, room_id=room_id)
+ distributor.fire("user_left_room", user=user, room_id=room_id)
def user_joined_room(distributor, user, room_id):
- with PreserveLoggingContext():
- distributor.fire("user_joined_room", user=user, room_id=room_id)
+ distributor.fire("user_joined_room", user=user, room_id=room_id)
class Distributor(object):
@@ -44,9 +42,7 @@ class Distributor(object):
model will do for today.
"""
- def __init__(self, suppress_failures=True):
- self.suppress_failures = suppress_failures
-
+ def __init__(self):
self.signals = {}
self.pre_registration = {}
@@ -56,7 +52,6 @@ class Distributor(object):
self.signals[name] = Signal(
name,
- suppress_failures=self.suppress_failures,
)
if name in self.pre_registration:
@@ -75,10 +70,18 @@ class Distributor(object):
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
+ """Dispatches the given signal to the registered observers.
+
+ Runs the observers as a background process. Does not return a deferred.
+ """
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
- return self.signals[name].fire(*args, **kwargs)
+ run_as_background_process(
+ name,
+ self.signals[name].fire,
+ *args, **kwargs
+ )
class Signal(object):
@@ -91,9 +94,8 @@ class Signal(object):
method into all of the observers.
"""
- def __init__(self, name, suppress_failures):
+ def __init__(self, name):
self.name = name
- self.suppress_failures = suppress_failures
self.observers = []
def observe(self, observer):
@@ -103,7 +105,6 @@ class Signal(object):
Each observer callable may return a Deferred."""
self.observers.append(observer)
- @defer.inlineCallbacks
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
@@ -121,22 +122,17 @@ class Signal(object):
failure.type,
failure.value,
failure.getTracebackObject()))
- if not self.suppress_failures:
- return failure
return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
- with PreserveLoggingContext():
- deferreds = [
- do(observer)
- for observer in self.observers
- ]
-
- res = yield defer.gatherResults(
- deferreds, consumeErrors=True
- ).addErrback(unwrapFirstError)
+ deferreds = [
+ run_in_background(do, o)
+ for o in self.observers
+ ]
- defer.returnValue(res)
+ return make_deferred_yieldable(defer.gatherResults(
+ deferreds, consumeErrors=True,
+ ))
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 581c6052..014edea9 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from six import string_types
+from six import binary_type, text_type
from canonicaljson import json
from frozendict import frozendict
@@ -26,7 +26,7 @@ def freeze(o):
if isinstance(o, frozendict):
return o
- if isinstance(o, string_types):
+ if isinstance(o, (binary_type, text_type)):
return o
try:
@@ -41,7 +41,7 @@ def unfreeze(o):
if isinstance(o, (dict, frozendict)):
return dict({k: unfreeze(v) for k, v in o.items()})
- if isinstance(o, string_types):
+ if isinstance(o, (binary_type, text_type)):
return o
try:
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index f6c7175f..8dcae50b 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -99,6 +99,17 @@ class ContextResourceUsage(object):
self.db_sched_duration_sec = 0
self.evt_db_fetch_count = 0
+ def __repr__(self):
+ return ("<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
+ "db_txn_count='%r', db_txn_duration_sec='%r', "
+ "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>") % (
+ self.ru_stime,
+ self.ru_utime,
+ self.db_txn_count,
+ self.db_txn_duration_sec,
+ self.db_sched_duration_sec,
+ self.evt_db_fetch_count,)
+
def __iadd__(self, other):
"""Add another ContextResourceUsage's stats to this one's.
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 6ba71078..97f12673 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -104,12 +104,19 @@ class Measure(object):
logger.warn("Expected context. (%r)", self.name)
return
- usage = context.get_resource_usage() - self.start_usage
- block_ru_utime.labels(self.name).inc(usage.ru_utime)
- block_ru_stime.labels(self.name).inc(usage.ru_stime)
- block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
- block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
- block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
+ current = context.get_resource_usage()
+ usage = current - self.start_usage
+ try:
+ block_ru_utime.labels(self.name).inc(usage.ru_utime)
+ block_ru_stime.labels(self.name).inc(usage.ru_stime)
+ block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
+ block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
+ block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
+ except ValueError:
+ logger.warn(
+ "Failed to save metrics! OLD: %r, NEW: %r",
+ self.start_usage, current
+ )
if self.created_context:
self.start_context.__exit__(exc_type, exc_val, exc_tb)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 9b97ea2b..d4680863 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -12,18 +12,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import itertools
+
import logging
import operator
-import six
+from six import iteritems, itervalues
+from six.moves import map
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events.utils import prune_event
from synapse.types import get_domain_from_id
-from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
logger = logging.getLogger(__name__)
@@ -75,19 +75,6 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
types=types,
)
- forgotten = yield make_deferred_yieldable(defer.gatherResults([
- defer.maybeDeferred(
- preserve_fn(store.who_forgot_in_room),
- room_id,
- )
- for room_id in frozenset(e.room_id for e in events)
- ], consumeErrors=True))
-
- # Set of membership event_ids that have been forgotten
- event_id_forgotten = frozenset(
- row["event_id"] for rows in forgotten for row in rows
- )
-
ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id,
)
@@ -176,10 +163,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
if membership is None:
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
- # XXX why do we do this?
- # https://github.com/matrix-org/synapse/issues/3350
- if membership_event.event_id not in event_id_forgotten:
- membership = membership_event.membership
+ membership = membership_event.membership
# if the user was a member of the room at the time of the event,
# they can see it.
@@ -221,7 +205,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False,
return event
# check each event: gives an iterable[None|EventBase]
- filtered_events = itertools.imap(allowed, events)
+ filtered_events = map(allowed, events)
# remove the None entries
filtered_events = filter(operator.truth, filtered_events)
@@ -261,7 +245,7 @@ def filter_events_for_server(store, server_name, events):
# membership states for the requesting server to determine
# if the server is either in the room or has been invited
# into the room.
- for ev in state.itervalues():
+ for ev in itervalues(state):
if ev.type != EventTypes.Member:
continue
try:
@@ -295,7 +279,7 @@ def filter_events_for_server(store, server_name, events):
)
visibility_ids = set()
- for sids in event_to_state_ids.itervalues():
+ for sids in itervalues(event_to_state_ids):
hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
if hist:
visibility_ids.add(hist)
@@ -308,7 +292,7 @@ def filter_events_for_server(store, server_name, events):
event_map = yield store.get_events(visibility_ids)
all_open = all(
e.content.get("history_visibility") in (None, "shared", "world_readable")
- for e in event_map.itervalues()
+ for e in itervalues(event_map)
)
if all_open:
@@ -346,7 +330,7 @@ def filter_events_for_server(store, server_name, events):
#
state_key_to_event_id_set = {
e
- for key_to_eid in six.itervalues(event_to_state_ids)
+ for key_to_eid in itervalues(event_to_state_ids)
for e in key_to_eid.items()
}
@@ -369,10 +353,10 @@ def filter_events_for_server(store, server_name, events):
event_to_state = {
e_id: {
key: event_map[inner_e_id]
- for key, inner_e_id in key_to_eid.iteritems()
+ for key, inner_e_id in iteritems(key_to_eid)
if inner_e_id in event_map
}
- for e_id, key_to_eid in event_to_state_ids.iteritems()
+ for e_id, key_to_eid in iteritems(event_to_state_ids)
}
defer.returnValue([
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 5f158ec4..a82d737e 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -46,7 +46,7 @@ class AuthTestCase(unittest.TestCase):
self.auth = Auth(self.hs)
self.test_user = "@foo:bar"
- self.test_token = "_test_token_"
+ self.test_token = b"_test_token_"
# this is overridden for the appservice tests
self.store.get_app_service_by_token = Mock(return_value=None)
@@ -61,7 +61,7 @@ class AuthTestCase(unittest.TestCase):
self.store.get_user_by_access_token = Mock(return_value=user_info)
request = Mock(args={})
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), self.test_user)
@@ -70,7 +70,7 @@ class AuthTestCase(unittest.TestCase):
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@@ -98,7 +98,7 @@ class AuthTestCase(unittest.TestCase):
request = Mock(args={})
request.getClientIP.return_value = "127.0.0.1"
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), self.test_user)
@@ -115,7 +115,7 @@ class AuthTestCase(unittest.TestCase):
request = Mock(args={})
request.getClientIP.return_value = "192.168.10.10"
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), self.test_user)
@@ -131,7 +131,7 @@ class AuthTestCase(unittest.TestCase):
request = Mock(args={})
request.getClientIP.return_value = "131.111.8.42"
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@@ -141,7 +141,7 @@ class AuthTestCase(unittest.TestCase):
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
- request.args["access_token"] = [self.test_token]
+ request.args[b"access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@@ -158,7 +158,7 @@ class AuthTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_get_user_by_req_appservice_valid_token_valid_user_id(self):
- masquerading_user_id = "@doppelganger:matrix.org"
+ masquerading_user_id = b"@doppelganger:matrix.org"
app_service = Mock(
token="foobar", url="a_url", sender=self.test_user,
ip_range_whitelist=None,
@@ -169,14 +169,17 @@ class AuthTestCase(unittest.TestCase):
request = Mock(args={})
request.getClientIP.return_value = "127.0.0.1"
- request.args["access_token"] = [self.test_token]
- request.args["user_id"] = [masquerading_user_id]
+ request.args[b"access_token"] = [self.test_token]
+ request.args[b"user_id"] = [masquerading_user_id]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
- self.assertEquals(requester.user.to_string(), masquerading_user_id)
+ self.assertEquals(
+ requester.user.to_string(),
+ masquerading_user_id.decode('utf8')
+ )
def test_get_user_by_req_appservice_valid_token_bad_user_id(self):
- masquerading_user_id = "@doppelganger:matrix.org"
+ masquerading_user_id = b"@doppelganger:matrix.org"
app_service = Mock(
token="foobar", url="a_url", sender=self.test_user,
ip_range_whitelist=None,
@@ -187,8 +190,8 @@ class AuthTestCase(unittest.TestCase):
request = Mock(args={})
request.getClientIP.return_value = "127.0.0.1"
- request.args["access_token"] = [self.test_token]
- request.args["user_id"] = [masquerading_user_id]
+ request.args[b"access_token"] = [self.test_token]
+ request.args[b"user_id"] = [masquerading_user_id]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@@ -418,7 +421,7 @@ class AuthTestCase(unittest.TestCase):
# check the token works
request = Mock(args={})
- request.args["access_token"] = [token]
+ request.args[b"access_token"] = [token.encode('ascii')]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
self.assertEqual(UserID.from_string(USER_ID), requester.user)
@@ -431,7 +434,7 @@ class AuthTestCase(unittest.TestCase):
# the token should *not* work now
request = Mock(args={})
- request.args["access_token"] = [guest_tok]
+ request.args[b"access_token"] = [guest_tok.encode('ascii')]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
with self.assertRaises(AuthError) as cm:
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 2e5e8e4d..55eab9e9 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from mock import Mock
import pymacaroons
@@ -19,6 +20,7 @@ from twisted.internet import defer
import synapse
import synapse.api.errors
+from synapse.api.errors import AuthError
from synapse.handlers.auth import AuthHandler
from tests import unittest
@@ -37,6 +39,10 @@ class AuthTestCase(unittest.TestCase):
self.hs.handlers = AuthHandlers(self.hs)
self.auth_handler = self.hs.handlers.auth_handler
self.macaroon_generator = self.hs.get_macaroon_generator()
+ # MAU tests
+ self.hs.config.max_mau_value = 50
+ self.small_number_of_users = 1
+ self.large_number_of_users = 100
def test_token_is_a_macaroon(self):
token = self.macaroon_generator.generate_access_token("some_user")
@@ -71,38 +77,37 @@ class AuthTestCase(unittest.TestCase):
v.satisfy_general(verify_nonce)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
+ @defer.inlineCallbacks
def test_short_term_login_token_gives_user_id(self):
self.hs.clock.now = 1000
token = self.macaroon_generator.generate_short_term_login_token(
"a_user", 5000
)
-
- self.assertEqual(
- "a_user",
- self.auth_handler.validate_short_term_login_token_and_get_user_id(
- token
- )
+ user_id = yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ token
)
+ self.assertEqual("a_user", user_id)
# when we advance the clock, the token should be rejected
self.hs.clock.now = 6000
with self.assertRaises(synapse.api.errors.AuthError):
- self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
token
)
+ @defer.inlineCallbacks
def test_short_term_login_token_cannot_replace_user_id(self):
token = self.macaroon_generator.generate_short_term_login_token(
"a_user", 5000
)
macaroon = pymacaroons.Macaroon.deserialize(token)
+ user_id = yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ macaroon.serialize()
+ )
self.assertEqual(
- "a_user",
- self.auth_handler.validate_short_term_login_token_and_get_user_id(
- macaroon.serialize()
- )
+ "a_user", user_id
)
# add another "user_id" caveat, which might allow us to override the
@@ -110,6 +115,57 @@ class AuthTestCase(unittest.TestCase):
macaroon.add_first_party_caveat("user_id = b_user")
with self.assertRaises(synapse.api.errors.AuthError):
- self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
macaroon.serialize()
)
+
+ @defer.inlineCallbacks
+ def test_mau_limits_disabled(self):
+ self.hs.config.limit_usage_by_mau = False
+ # Ensure does not throw exception
+ yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+ yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ self._get_macaroon().serialize()
+ )
+
+ @defer.inlineCallbacks
+ def test_mau_limits_exceeded(self):
+ self.hs.config.limit_usage_by_mau = True
+ self.hs.get_datastore().count_monthly_users = Mock(
+ return_value=defer.succeed(self.large_number_of_users)
+ )
+
+ with self.assertRaises(AuthError):
+ yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+ self.hs.get_datastore().count_monthly_users = Mock(
+ return_value=defer.succeed(self.large_number_of_users)
+ )
+ with self.assertRaises(AuthError):
+ yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ self._get_macaroon().serialize()
+ )
+
+ @defer.inlineCallbacks
+ def test_mau_limits_not_exceeded(self):
+ self.hs.config.limit_usage_by_mau = True
+
+ self.hs.get_datastore().count_monthly_users = Mock(
+ return_value=defer.succeed(self.small_number_of_users)
+ )
+ # Ensure does not raise exception
+ yield self.auth_handler.get_access_token_for_user_id('user_a')
+
+ self.hs.get_datastore().count_monthly_users = Mock(
+ return_value=defer.succeed(self.small_number_of_users)
+ )
+ yield self.auth_handler.validate_short_term_login_token_and_get_user_id(
+ self._get_macaroon().serialize()
+ )
+
+ def _get_macaroon(self):
+ token = self.macaroon_generator.generate_short_term_login_token(
+ "user_a", 5000
+ )
+ return pymacaroons.Macaroon.deserialize(token)
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 025fa1be..0937d71c 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -17,6 +17,7 @@ from mock import Mock
from twisted.internet import defer
+from synapse.api.errors import RegistrationError
from synapse.handlers.register import RegistrationHandler
from synapse.types import UserID, create_requester
@@ -77,3 +78,53 @@ class RegistrationTestCase(unittest.TestCase):
requester, local_part, display_name)
self.assertEquals(result_user_id, user_id)
self.assertEquals(result_token, 'secret')
+
+ @defer.inlineCallbacks
+ def test_cannot_register_when_mau_limits_exceeded(self):
+ local_part = "someone"
+ display_name = "someone"
+ requester = create_requester("@as:test")
+ store = self.hs.get_datastore()
+ self.hs.config.limit_usage_by_mau = False
+ self.hs.config.max_mau_value = 50
+ lots_of_users = 100
+ small_number_users = 1
+
+ store.count_monthly_users = Mock(return_value=defer.succeed(lots_of_users))
+
+ # Ensure does not throw exception
+ yield self.handler.get_or_create_user(requester, 'a', display_name)
+
+ self.hs.config.limit_usage_by_mau = True
+
+ with self.assertRaises(RegistrationError):
+ yield self.handler.get_or_create_user(requester, 'b', display_name)
+
+ store.count_monthly_users = Mock(return_value=defer.succeed(small_number_users))
+
+ self._macaroon_mock_generator("another_secret")
+
+ # Ensure does not throw exception
+ yield self.handler.get_or_create_user("@neil:matrix.org", 'c', "Neil")
+
+ self._macaroon_mock_generator("another another secret")
+ store.count_monthly_users = Mock(return_value=defer.succeed(lots_of_users))
+
+ with self.assertRaises(RegistrationError):
+ yield self.handler.register(localpart=local_part)
+
+ self._macaroon_mock_generator("another another secret")
+ store.count_monthly_users = Mock(return_value=defer.succeed(lots_of_users))
+
+ with self.assertRaises(RegistrationError):
+ yield self.handler.register_saml2(local_part)
+
+ def _macaroon_mock_generator(self, secret):
+ """
+ Reset macaroon generator in the case where the test creates multiple users
+ """
+ macaroon_generator = Mock(
+ generate_access_token=Mock(return_value=secret))
+ self.hs.get_macaroon_generator = Mock(return_value=macaroon_generator)
+ self.hs.handlers = RegistrationHandlers(self.hs)
+ self.handler = self.hs.get_handlers().registration_handler
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index b08856f7..2c263af1 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -44,7 +44,6 @@ def _expect_edu(destination, edu_type, content, origin="test"):
"content": content,
}
],
- "pdu_failures": [],
}
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py
index 8708c8a1..a103e7be 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/slave/storage/_base.py
@@ -11,23 +11,44 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
import tempfile
from mock import Mock, NonCallableMock
from twisted.internet import defer, reactor
+from twisted.internet.defer import Deferred
from synapse.replication.tcp.client import (
ReplicationClientFactory,
ReplicationClientHandler,
)
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
+from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
from tests import unittest
from tests.utils import setup_test_homeserver
+class TestReplicationClientHandler(ReplicationClientHandler):
+ """Overrides on_rdata so that we can wait for it to happen"""
+ def __init__(self, store):
+ super(TestReplicationClientHandler, self).__init__(store)
+ self._rdata_awaiters = []
+
+ def await_replication(self):
+ d = Deferred()
+ self._rdata_awaiters.append(d)
+ return make_deferred_yieldable(d)
+
+ def on_rdata(self, stream_name, token, rows):
+ awaiters = self._rdata_awaiters
+ self._rdata_awaiters = []
+ super(TestReplicationClientHandler, self).on_rdata(stream_name, token, rows)
+ with PreserveLoggingContext():
+ for a in awaiters:
+ a.callback(None)
+
+
class BaseSlavedStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
@@ -52,7 +73,7 @@ class BaseSlavedStoreTestCase(unittest.TestCase):
self.addCleanup(listener.stopListening)
self.streamer = server_factory.streamer
- self.replication_handler = ReplicationClientHandler(self.slaved_store)
+ self.replication_handler = TestReplicationClientHandler(self.slaved_store)
client_factory = ReplicationClientFactory(
self.hs, "client_name", self.replication_handler
)
@@ -60,12 +81,14 @@ class BaseSlavedStoreTestCase(unittest.TestCase):
self.addCleanup(client_factory.stopTrying)
self.addCleanup(client_connector.disconnect)
- @defer.inlineCallbacks
def replicate(self):
- yield self.streamer.on_notifier_poke()
- d = self.replication_handler.await_sync("replication_test")
- self.streamer.send_sync_to_all_connections("replication_test")
- yield d
+ """Tell the master side of replication that something has happened, and then
+ wait for the replication to occur.
+ """
+ # xxx: should we be more specific in what we wait for?
+ d = self.replication_handler.await_replication()
+ self.streamer.on_notifier_poke()
+ return d
@defer.inlineCallbacks
def check(self, method, args, expected_result=None):
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index cea01d93..f5b47f5e 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -222,9 +222,11 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
state_ids = {
key: e.event_id for key, e in state.items()
}
- context = EventContext()
- context.current_state_ids = state_ids
- context.prev_state_ids = state_ids
+ context = EventContext.with_state(
+ state_group=None,
+ current_state_ids=state_ids,
+ prev_state_ids=state_ids
+ )
else:
state_handler = self.hs.get_state_handler()
context = yield state_handler.compute_event_context(event)
diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/client/v1/test_admin.py
new file mode 100644
index 00000000..8c901456
--- /dev/null
+++ b/tests/rest/client/v1/test_admin.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import hmac
+import json
+
+from mock import Mock
+
+from synapse.http.server import JsonResource
+from synapse.rest.client.v1.admin import register_servlets
+from synapse.util import Clock
+
+from tests import unittest
+from tests.server import (
+ ThreadedMemoryReactorClock,
+ make_request,
+ render,
+ setup_test_homeserver,
+)
+
+
+class UserRegisterTestCase(unittest.TestCase):
+ def setUp(self):
+
+ self.clock = ThreadedMemoryReactorClock()
+ self.hs_clock = Clock(self.clock)
+ self.url = "/_matrix/client/r0/admin/register"
+
+ self.registration_handler = Mock()
+ self.identity_handler = Mock()
+ self.login_handler = Mock()
+ self.device_handler = Mock()
+ self.device_handler.check_device_registered = Mock(return_value="FAKE")
+
+ self.datastore = Mock(return_value=Mock())
+ self.datastore.get_current_state_deltas = Mock(return_value=[])
+
+ self.secrets = Mock()
+
+ self.hs = setup_test_homeserver(
+ http_client=None, clock=self.hs_clock, reactor=self.clock
+ )
+
+ self.hs.config.registration_shared_secret = u"shared"
+
+ self.hs.get_media_repository = Mock()
+ self.hs.get_deactivate_account_handler = Mock()
+
+ self.resource = JsonResource(self.hs)
+ register_servlets(self.hs, self.resource)
+
+ def test_disabled(self):
+ """
+ If there is no shared secret, registration through this method will be
+ prevented.
+ """
+ self.hs.config.registration_shared_secret = None
+
+ request, channel = make_request("POST", self.url, b'{}')
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(
+ 'Shared secret registration is not enabled', channel.json_body["error"]
+ )
+
+ def test_get_nonce(self):
+ """
+ Calling GET on the endpoint will return a randomised nonce, using the
+ homeserver's secrets provider.
+ """
+ secrets = Mock()
+ secrets.token_hex = Mock(return_value="abcd")
+
+ self.hs.get_secrets = Mock(return_value=secrets)
+
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(channel.json_body, {"nonce": "abcd"})
+
+ def test_expired_nonce(self):
+ """
+ Calling GET on the endpoint will return a randomised nonce, which will
+ only last for SALT_TIMEOUT (60s).
+ """
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+ nonce = channel.json_body["nonce"]
+
+ # 59 seconds
+ self.clock.advance(59)
+
+ body = json.dumps({"nonce": nonce})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('username must be specified', channel.json_body["error"])
+
+ # 61 seconds
+ self.clock.advance(2)
+
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('unrecognised nonce', channel.json_body["error"])
+
+ def test_register_incorrect_nonce(self):
+ """
+ Only the provided nonce can be used, as it's checked in the MAC.
+ """
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+ nonce = channel.json_body["nonce"]
+
+ want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+ want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
+ want_mac = want_mac.hexdigest()
+
+ body = json.dumps(
+ {
+ "nonce": nonce,
+ "username": "bob",
+ "password": "abc123",
+ "admin": True,
+ "mac": want_mac,
+ }
+ ).encode('utf8')
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual("HMAC incorrect", channel.json_body["error"])
+
+ def test_register_correct_nonce(self):
+ """
+ When the correct nonce is provided, and the right key is provided, the
+ user is registered.
+ """
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+ nonce = channel.json_body["nonce"]
+
+ want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+ want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+ want_mac = want_mac.hexdigest()
+
+ body = json.dumps(
+ {
+ "nonce": nonce,
+ "username": "bob",
+ "password": "abc123",
+ "admin": True,
+ "mac": want_mac,
+ }
+ ).encode('utf8')
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+ def test_nonce_reuse(self):
+ """
+ A valid unrecognised nonce.
+ """
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+ nonce = channel.json_body["nonce"]
+
+ want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+ want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
+ want_mac = want_mac.hexdigest()
+
+ body = json.dumps(
+ {
+ "nonce": nonce,
+ "username": "bob",
+ "password": "abc123",
+ "admin": True,
+ "mac": want_mac,
+ }
+ ).encode('utf8')
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual("@bob:test", channel.json_body["user_id"])
+
+ # Now, try and reuse it
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('unrecognised nonce', channel.json_body["error"])
+
+ def test_missing_parts(self):
+ """
+ Synapse will complain if you don't give nonce, username, password, and
+ mac. Admin is optional. Additional checks are done for length and
+ type.
+ """
+ def nonce():
+ request, channel = make_request("GET", self.url)
+ render(request, self.resource, self.clock)
+ return channel.json_body["nonce"]
+
+ #
+ # Nonce check
+ #
+
+ # Must be present
+ body = json.dumps({})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('nonce must be specified', channel.json_body["error"])
+
+ #
+ # Username checks
+ #
+
+ # Must be present
+ body = json.dumps({"nonce": nonce()})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('username must be specified', channel.json_body["error"])
+
+ # Must be a string
+ body = json.dumps({"nonce": nonce(), "username": 1234})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid username', channel.json_body["error"])
+
+ # Must not have null bytes
+ body = json.dumps({"nonce": nonce(), "username": b"abcd\x00"})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid username', channel.json_body["error"])
+
+ # Must not have null bytes
+ body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid username', channel.json_body["error"])
+
+ #
+ # Username checks
+ #
+
+ # Must be present
+ body = json.dumps({"nonce": nonce(), "username": "a"})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('password must be specified', channel.json_body["error"])
+
+ # Must be a string
+ body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid password', channel.json_body["error"])
+
+ # Must not have null bytes
+ body = json.dumps({"nonce": nonce(), "username": "a", "password": b"abcd\x00"})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid password', channel.json_body["error"])
+
+ # Super long
+ body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
+ request, channel = make_request("POST", self.url, body.encode('utf8'))
+ render(request, self.resource, self.clock)
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual('Invalid password', channel.json_body["error"])
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
index a5af36a9..50418153 100644
--- a/tests/rest/client/v1/test_events.py
+++ b/tests/rest/client/v1/test_events.py
@@ -14,100 +14,30 @@
# limitations under the License.
""" Tests REST events for /events paths."""
+
from mock import Mock, NonCallableMock
+from six import PY3
-# twisted imports
from twisted.internet import defer
-import synapse.rest.client.v1.events
-import synapse.rest.client.v1.register
-import synapse.rest.client.v1.room
-
-from tests import unittest
-
from ....utils import MockHttpResource, setup_test_homeserver
from .utils import RestTestCase
PATH_PREFIX = "/_matrix/client/api/v1"
-class EventStreamPaginationApiTestCase(unittest.TestCase):
- """ Tests event streaming query parameters and start/end keys used in the
- Pagination stream API. """
- user_id = "sid1"
-
- def setUp(self):
- # configure stream and inject items
- pass
-
- def tearDown(self):
- pass
-
- def TODO_test_long_poll(self):
- # stream from 'end' key, send (self+other) message, expect message.
-
- # stream from 'END', send (self+other) message, expect message.
-
- # stream from 'end' key, send (self+other) topic, expect topic.
-
- # stream from 'END', send (self+other) topic, expect topic.
-
- # stream from 'end' key, send (self+other) invite, expect invite.
-
- # stream from 'END', send (self+other) invite, expect invite.
-
- pass
-
- def TODO_test_stream_forward(self):
- # stream from START, expect injected items
-
- # stream from 'start' key, expect same content
-
- # stream from 'end' key, expect nothing
-
- # stream from 'END', expect nothing
-
- # The following is needed for cases where content is removed e.g. you
- # left a room, so the token you're streaming from is > the one that
- # would be returned naturally from START>END.
- # stream from very new token (higher than end key), expect same token
- # returned as end key
- pass
-
- def TODO_test_limits(self):
- # stream from a key, expect limit_num items
-
- # stream from START, expect limit_num items
-
- pass
-
- def TODO_test_range(self):
- # stream from key to key, expect X items
-
- # stream from key to END, expect X items
-
- # stream from START to key, expect X items
-
- # stream from START to END, expect all items
- pass
-
- def TODO_test_direction(self):
- # stream from END to START and fwds, expect newest first
-
- # stream from END to START and bwds, expect oldest first
-
- # stream from START to END and fwds, expect oldest first
-
- # stream from START to END and bwds, expect newest first
-
- pass
-
-
class EventStreamPermissionsTestCase(RestTestCase):
""" Tests event streaming (GET /events). """
+ if PY3:
+ skip = "Skip on Py3 until ported to use not V1 only register."
+
@defer.inlineCallbacks
def setUp(self):
+ import synapse.rest.client.v1.events
+ import synapse.rest.client.v1_only.register
+ import synapse.rest.client.v1.room
+
self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
hs = yield setup_test_homeserver(
@@ -125,7 +55,7 @@ class EventStreamPermissionsTestCase(RestTestCase):
hs.get_handlers().federation_handler = Mock()
- synapse.rest.client.v1.register.register_servlets(hs, self.mock_resource)
+ synapse.rest.client.v1_only.register.register_servlets(hs, self.mock_resource)
synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
diff --git a/tests/rest/client/v1/test_register.py b/tests/rest/client/v1/test_register.py
index f15fb362..83a23cd8 100644
--- a/tests/rest/client/v1/test_register.py
+++ b/tests/rest/client/v1/test_register.py
@@ -16,11 +16,12 @@
import json
from mock import Mock
+from six import PY3
from twisted.test.proto_helpers import MemoryReactorClock
from synapse.http.server import JsonResource
-from synapse.rest.client.v1.register import register_servlets
+from synapse.rest.client.v1_only.register import register_servlets
from synapse.util import Clock
from tests import unittest
@@ -31,6 +32,8 @@ class CreateUserServletTestCase(unittest.TestCase):
"""
Tests for CreateUserRestServlet.
"""
+ if PY3:
+ skip = "Not ported to Python 3."
def setUp(self):
self.registration_handler = Mock()
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 6b576409..00fc7967 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -20,7 +20,6 @@ import json
from mock import Mock, NonCallableMock
from six.moves.urllib import parse as urlparse
-# twisted imports
from twisted.internet import defer
import synapse.rest.client.v1.room
@@ -86,6 +85,7 @@ class RoomBase(unittest.TestCase):
self.resource = JsonResource(self.hs)
synapse.rest.client.v1.room.register_servlets(self.hs, self.resource)
+ synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource)
self.helper = RestHelper(self.hs, self.resource, self.user_id)
diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py
index 5ea9cc82..e890f0fe 100644
--- a/tests/rest/client/v2_alpha/test_filter.py
+++ b/tests/rest/client/v2_alpha/test_filter.py
@@ -21,8 +21,12 @@ from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
-from tests.server import ThreadedMemoryReactorClock as MemoryReactorClock
-from tests.server import make_request, setup_test_homeserver, wait_until_result
+from tests.server import (
+ ThreadedMemoryReactorClock as MemoryReactorClock,
+ make_request,
+ setup_test_homeserver,
+ wait_until_result,
+)
PATH_PREFIX = "/_matrix/client/v2_alpha"
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 704cf97a..03ec3993 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -20,8 +20,12 @@ from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
-from tests.server import ThreadedMemoryReactorClock as MemoryReactorClock
-from tests.server import make_request, setup_test_homeserver, wait_until_result
+from tests.server import (
+ ThreadedMemoryReactorClock as MemoryReactorClock,
+ make_request,
+ setup_test_homeserver,
+ wait_until_result,
+)
PATH_PREFIX = "/_matrix/client/v2_alpha"
diff --git a/tests/storage/test__init__.py b/tests/storage/test__init__.py
new file mode 100644
index 00000000..f19cb126
--- /dev/null
+++ b/tests/storage/test__init__.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import tests.utils
+
+
+class InitTestCase(tests.unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(InitTestCase, self).__init__(*args, **kwargs)
+ self.store = None # type: synapse.storage.DataStore
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield tests.utils.setup_test_homeserver()
+
+ hs.config.max_mau_value = 50
+ hs.config.limit_usage_by_mau = True
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def test_count_monthly_users(self):
+ count = yield self.store.count_monthly_users()
+ self.assertEqual(0, count)
+
+ yield self._insert_user_ips("@user:server1")
+ yield self._insert_user_ips("@user:server2")
+
+ count = yield self.store.count_monthly_users()
+ self.assertEqual(2, count)
+
+ @defer.inlineCallbacks
+ def _insert_user_ips(self, user):
+ """
+ Helper function to populate user_ips without using batch insertion infra
+ args:
+ user (str): specify username i.e. @user:server.com
+ """
+ yield self.store._simple_upsert(
+ table="user_ips",
+ keyvalues={
+ "user_id": user,
+ "access_token": "access_token",
+ "ip": "ip",
+ "user_agent": "user_agent",
+ "device_id": "device_id",
+ },
+ values={
+ "last_seen": self.clock.time_msec(),
+ }
+ )
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
new file mode 100644
index 00000000..7a76d67b
--- /dev/null
+++ b/tests/storage/test_state.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import RoomID, UserID
+
+import tests.unittest
+import tests.utils
+
+logger = logging.getLogger(__name__)
+
+
+class StateStoreTestCase(tests.unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(StateStoreTestCase, self).__init__(*args, **kwargs)
+ self.store = None # type: synapse.storage.DataStore
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield tests.utils.setup_test_homeserver()
+
+ self.store = hs.get_datastore()
+ self.event_builder_factory = hs.get_event_builder_factory()
+ self.event_creation_handler = hs.get_event_creation_handler()
+
+ self.u_alice = UserID.from_string("@alice:test")
+ self.u_bob = UserID.from_string("@bob:test")
+
+ self.room = RoomID.from_string("!abc123:test")
+
+ yield self.store.store_room(
+ self.room.to_string(),
+ room_creator_user_id="@creator:text",
+ is_public=True
+ )
+
+ @defer.inlineCallbacks
+ def inject_state_event(self, room, sender, typ, state_key, content):
+ builder = self.event_builder_factory.new({
+ "type": typ,
+ "sender": sender.to_string(),
+ "state_key": state_key,
+ "room_id": room.to_string(),
+ "content": content,
+ })
+
+ event, context = yield self.event_creation_handler.create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ defer.returnValue(event)
+
+ def assertStateMapEqual(self, s1, s2):
+ for t in s1:
+ # just compare event IDs for simplicity
+ self.assertEqual(s1[t].event_id, s2[t].event_id)
+ self.assertEqual(len(s1), len(s2))
+
+ @defer.inlineCallbacks
+ def test_get_state_for_event(self):
+
+ # this defaults to a linear DAG as each new injection defaults to whatever
+ # forward extremities are currently in the DB for this room.
+ e1 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, '', {},
+ )
+ e2 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Name, '', {
+ "name": "test room"
+ },
+ )
+ e3 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Member, self.u_alice.to_string(), {
+ "membership": Membership.JOIN
+ },
+ )
+ e4 = yield self.inject_state_event(
+ self.room, self.u_bob, EventTypes.Member, self.u_bob.to_string(), {
+ "membership": Membership.JOIN
+ },
+ )
+ e5 = yield self.inject_state_event(
+ self.room, self.u_bob, EventTypes.Member, self.u_bob.to_string(), {
+ "membership": Membership.LEAVE
+ },
+ )
+
+ # check we get the full state as of the final event
+ state = yield self.store.get_state_for_event(
+ e5.event_id, None, filtered_types=None
+ )
+
+ self.assertIsNotNone(e4)
+
+ self.assertStateMapEqual({
+ (e1.type, e1.state_key): e1,
+ (e2.type, e2.state_key): e2,
+ (e3.type, e3.state_key): e3,
+ # e4 is overwritten by e5
+ (e5.type, e5.state_key): e5,
+ }, state)
+
+ # check we can filter to the m.room.name event (with a '' state key)
+ state = yield self.store.get_state_for_event(
+ e5.event_id, [(EventTypes.Name, '')], filtered_types=None
+ )
+
+ self.assertStateMapEqual({
+ (e2.type, e2.state_key): e2,
+ }, state)
+
+ # check we can filter to the m.room.name event (with a wildcard None state key)
+ state = yield self.store.get_state_for_event(
+ e5.event_id, [(EventTypes.Name, None)], filtered_types=None
+ )
+
+ self.assertStateMapEqual({
+ (e2.type, e2.state_key): e2,
+ }, state)
+
+ # check we can grab the m.room.member events (with a wildcard None state key)
+ state = yield self.store.get_state_for_event(
+ e5.event_id, [(EventTypes.Member, None)], filtered_types=None
+ )
+
+ self.assertStateMapEqual({
+ (e3.type, e3.state_key): e3,
+ (e5.type, e5.state_key): e5,
+ }, state)
+
+ # check we can use filter_types to grab a specific room member
+ # without filtering out the other event types
+ state = yield self.store.get_state_for_event(
+ e5.event_id, [(EventTypes.Member, self.u_alice.to_string())],
+ filtered_types=[EventTypes.Member],
+ )
+
+ self.assertStateMapEqual({
+ (e1.type, e1.state_key): e1,
+ (e2.type, e2.state_key): e2,
+ (e3.type, e3.state_key): e3,
+ }, state)
+
+ # check that types=[], filtered_types=[EventTypes.Member]
+ # doesn't return all members
+ state = yield self.store.get_state_for_event(
+ e5.event_id, [], filtered_types=[EventTypes.Member],
+ )
+
+ self.assertStateMapEqual({
+ (e1.type, e1.state_key): e1,
+ (e2.type, e2.state_key): e2,
+ }, state)
+
+ #######################################################
+ # _get_some_state_from_cache tests against a full cache
+ #######################################################
+
+ room_id = self.room.to_string()
+ group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
+ group = group_ids.keys()[0]
+
+ # test _get_some_state_from_cache correctly filters out members with types=[]
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, True)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ (e2.type, e2.state_key): e2.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members with wildcard types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, True)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ (e2.type, e2.state_key): e2.event_id,
+ (e3.type, e3.state_key): e3.event_id,
+ # e4 is overwritten by e5
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members with specific types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, e5.state_key)], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, True)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ (e2.type, e2.state_key): e2.event_id,
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members with specific types
+ # and no filtered_types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+ )
+
+ self.assertEqual(is_all, True)
+ self.assertDictEqual({
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
+
+ #######################################################
+ # deliberately remove e2 (room name) from the _state_group_cache
+
+ (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(group)
+
+ self.assertEqual(is_all, True)
+ self.assertEqual(known_absent, set())
+ self.assertDictEqual(state_dict_ids, {
+ (e1.type, e1.state_key): e1.event_id,
+ (e2.type, e2.state_key): e2.event_id,
+ (e3.type, e3.state_key): e3.event_id,
+ # e4 is overwritten by e5
+ (e5.type, e5.state_key): e5.event_id,
+ })
+
+ state_dict_ids.pop((e2.type, e2.state_key))
+ self.store._state_group_cache.invalidate(group)
+ self.store._state_group_cache.update(
+ sequence=self.store._state_group_cache.sequence,
+ key=group,
+ value=state_dict_ids,
+ # list fetched keys so it knows it's partial
+ fetched_keys=(
+ (e1.type, e1.state_key),
+ (e3.type, e3.state_key),
+ (e5.type, e5.state_key),
+ )
+ )
+
+ (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(group)
+
+ self.assertEqual(is_all, False)
+ self.assertEqual(known_absent, set([
+ (e1.type, e1.state_key),
+ (e3.type, e3.state_key),
+ (e5.type, e5.state_key),
+ ]))
+ self.assertDictEqual(state_dict_ids, {
+ (e1.type, e1.state_key): e1.event_id,
+ (e3.type, e3.state_key): e3.event_id,
+ (e5.type, e5.state_key): e5.event_id,
+ })
+
+ ############################################
+ # test that things work with a partial cache
+
+ # test _get_some_state_from_cache correctly filters out members with types=[]
+ room_id = self.room.to_string()
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, False)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members wildcard types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, False)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ (e3.type, e3.state_key): e3.event_id,
+ # e4 is overwritten by e5
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members with specific types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, e5.state_key)], filtered_types=[EventTypes.Member]
+ )
+
+ self.assertEqual(is_all, False)
+ self.assertDictEqual({
+ (e1.type, e1.state_key): e1.event_id,
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
+
+ # test _get_some_state_from_cache correctly filters in members with specific types
+ # and no filtered_types
+ (state_dict, is_all) = yield self.store._get_some_state_from_cache(
+ group, [(EventTypes.Member, e5.state_key)], filtered_types=None
+ )
+
+ self.assertEqual(is_all, True)
+ self.assertDictEqual({
+ (e5.type, e5.state_key): e5.event_id,
+ }, state_dict)
diff --git a/tests/test_distributor.py b/tests/test_distributor.py
index 04a88056..71d11cda 100644
--- a/tests/test_distributor.py
+++ b/tests/test_distributor.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +16,6 @@
from mock import Mock, patch
-from twisted.internet import defer
-
from synapse.util.distributor import Distributor
from . import unittest
@@ -27,38 +26,15 @@ class DistributorTestCase(unittest.TestCase):
def setUp(self):
self.dist = Distributor()
- @defer.inlineCallbacks
def test_signal_dispatch(self):
self.dist.declare("alert")
observer = Mock()
self.dist.observe("alert", observer)
- d = self.dist.fire("alert", 1, 2, 3)
- yield d
- self.assertTrue(d.called)
+ self.dist.fire("alert", 1, 2, 3)
observer.assert_called_with(1, 2, 3)
- @defer.inlineCallbacks
- def test_signal_dispatch_deferred(self):
- self.dist.declare("whine")
-
- d_inner = defer.Deferred()
-
- def observer():
- return d_inner
-
- self.dist.observe("whine", observer)
-
- d_outer = self.dist.fire("whine")
-
- self.assertFalse(d_outer.called)
-
- d_inner.callback(None)
- yield d_outer
- self.assertTrue(d_outer.called)
-
- @defer.inlineCallbacks
def test_signal_catch(self):
self.dist.declare("alarm")
@@ -71,9 +47,7 @@ class DistributorTestCase(unittest.TestCase):
with patch(
"synapse.util.distributor.logger", spec=["warning"]
) as mock_logger:
- d = self.dist.fire("alarm", "Go")
- yield d
- self.assertTrue(d.called)
+ self.dist.fire("alarm", "Go")
observers[0].assert_called_once_with("Go")
observers[1].assert_called_once_with("Go")
@@ -83,34 +57,12 @@ class DistributorTestCase(unittest.TestCase):
mock_logger.warning.call_args[0][0], str
)
- @defer.inlineCallbacks
- def test_signal_catch_no_suppress(self):
- # Gut-wrenching
- self.dist.suppress_failures = False
-
- self.dist.declare("whail")
-
- class MyException(Exception):
- pass
-
- @defer.inlineCallbacks
- def observer():
- raise MyException("Oopsie")
-
- self.dist.observe("whail", observer)
-
- d = self.dist.fire("whail")
-
- yield self.assertFailure(d, MyException)
- self.dist.suppress_failures = True
-
- @defer.inlineCallbacks
def test_signal_prereg(self):
observer = Mock()
self.dist.observe("flare", observer)
self.dist.declare("flare")
- yield self.dist.fire("flare", 4, 5)
+ self.dist.fire("flare", 4, 5)
observer.assert_called_with(4, 5)
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 159a1369..f40ff29b 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -137,7 +137,6 @@ class MessageAcceptTests(unittest.TestCase):
)
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
- @unittest.DEBUG
def test_cant_hide_past_history(self):
"""
If you send a message, you must be able to provide the direct
@@ -178,7 +177,7 @@ class MessageAcceptTests(unittest.TestCase):
for x, y in d.items()
if x == ("m.room.member", "@us:test")
],
- "auth_chain_ids": d.values(),
+ "auth_chain_ids": list(d.values()),
}
)
diff --git a/tests/test_state.py b/tests/test_state.py
index c0f2d115..429a18cb 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -204,7 +204,8 @@ class StateTestCase(unittest.TestCase):
self.store.register_event_context(event, context)
context_store[event.event_id] = context
- self.assertEqual(2, len(context_store["D"].prev_state_ids))
+ prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+ self.assertEqual(2, len(prev_state_ids))
@defer.inlineCallbacks
def test_branch_basic_conflict(self):
@@ -255,9 +256,11 @@ class StateTestCase(unittest.TestCase):
self.store.register_event_context(event, context)
context_store[event.event_id] = context
+ prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+
self.assertSetEqual(
{"START", "A", "C"},
- {e_id for e_id in context_store["D"].prev_state_ids.values()}
+ {e_id for e_id in prev_state_ids.values()}
)
@defer.inlineCallbacks
@@ -318,9 +321,11 @@ class StateTestCase(unittest.TestCase):
self.store.register_event_context(event, context)
context_store[event.event_id] = context
+ prev_state_ids = yield context_store["E"].get_prev_state_ids(self.store)
+
self.assertSetEqual(
{"START", "A", "B", "C"},
- {e for e in context_store["E"].prev_state_ids.values()}
+ {e for e in prev_state_ids.values()}
)
@defer.inlineCallbacks
@@ -398,9 +403,11 @@ class StateTestCase(unittest.TestCase):
self.store.register_event_context(event, context)
context_store[event.event_id] = context
+ prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
+
self.assertSetEqual(
{"A1", "A2", "A3", "A5", "B"},
- {e for e in context_store["D"].prev_state_ids.values()}
+ {e for e in prev_state_ids.values()}
)
def _add_depths(self, nodes, edges):
@@ -429,8 +436,10 @@ class StateTestCase(unittest.TestCase):
event, old_state=old_state
)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
self.assertEqual(
- set(e.event_id for e in old_state), set(context.current_state_ids.values())
+ set(e.event_id for e in old_state), set(current_state_ids.values())
)
self.assertIsNotNone(context.state_group)
@@ -449,8 +458,10 @@ class StateTestCase(unittest.TestCase):
event, old_state=old_state
)
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+
self.assertEqual(
- set(e.event_id for e in old_state), set(context.prev_state_ids.values())
+ set(e.event_id for e in old_state), set(prev_state_ids.values())
)
@defer.inlineCallbacks
@@ -475,9 +486,11 @@ class StateTestCase(unittest.TestCase):
context = yield self.state.compute_event_context(event)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
self.assertEqual(
set([e.event_id for e in old_state]),
- set(context.current_state_ids.values())
+ set(current_state_ids.values())
)
self.assertEqual(group_name, context.state_group)
@@ -504,9 +517,11 @@ class StateTestCase(unittest.TestCase):
context = yield self.state.compute_event_context(event)
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+
self.assertEqual(
set([e.event_id for e in old_state]),
- set(context.prev_state_ids.values())
+ set(prev_state_ids.values())
)
self.assertIsNotNone(context.state_group)
@@ -545,7 +560,9 @@ class StateTestCase(unittest.TestCase):
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
)
- self.assertEqual(len(context.current_state_ids), 6)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
+ self.assertEqual(len(current_state_ids), 6)
self.assertIsNotNone(context.state_group)
@@ -585,7 +602,9 @@ class StateTestCase(unittest.TestCase):
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
)
- self.assertEqual(len(context.current_state_ids), 6)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
+ self.assertEqual(len(current_state_ids), 6)
self.assertIsNotNone(context.state_group)
@@ -642,8 +661,10 @@ class StateTestCase(unittest.TestCase):
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
self.assertEqual(
- old_state_2[3].event_id, context.current_state_ids[("test1", "1")]
+ old_state_2[3].event_id, current_state_ids[("test1", "1")]
)
# Reverse the depth to make sure we are actually using the depths
@@ -670,8 +691,10 @@ class StateTestCase(unittest.TestCase):
event, prev_event_id1, old_state_1, prev_event_id2, old_state_2,
)
+ current_state_ids = yield context.get_current_state_ids(self.store)
+
self.assertEqual(
- old_state_1[3].event_id, context.current_state_ids[("test1", "1")]
+ old_state_1[3].event_id, current_state_ids[("test1", "1")]
)
def _get_context(self, event, prev_event_id_1, old_state_1, prev_event_id_2,
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 8176a7da..ca8a7c90 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -273,3 +273,104 @@ class DescriptorTestCase(unittest.TestCase):
r = yield obj.fn(2, 3)
self.assertEqual(r, 'chips')
obj.mock.assert_not_called()
+
+
+class CachedListDescriptorTestCase(unittest.TestCase):
+ @defer.inlineCallbacks
+ def test_cache(self):
+ class Cls(object):
+ def __init__(self):
+ self.mock = mock.Mock()
+
+ @descriptors.cached()
+ def fn(self, arg1, arg2):
+ pass
+
+ @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
+ def list_fn(self, args1, arg2):
+ assert (
+ logcontext.LoggingContext.current_context().request == "c1"
+ )
+ # we want this to behave like an asynchronous function
+ yield run_on_reactor()
+ assert (
+ logcontext.LoggingContext.current_context().request == "c1"
+ )
+ defer.returnValue(self.mock(args1, arg2))
+
+ with logcontext.LoggingContext() as c1:
+ c1.request = "c1"
+ obj = Cls()
+ obj.mock.return_value = {10: 'fish', 20: 'chips'}
+ d1 = obj.list_fn([10, 20], 2)
+ self.assertEqual(
+ logcontext.LoggingContext.current_context(),
+ logcontext.LoggingContext.sentinel,
+ )
+ r = yield d1
+ self.assertEqual(
+ logcontext.LoggingContext.current_context(),
+ c1
+ )
+ obj.mock.assert_called_once_with([10, 20], 2)
+ self.assertEqual(r, {10: 'fish', 20: 'chips'})
+ obj.mock.reset_mock()
+
+ # a call with different params should call the mock again
+ obj.mock.return_value = {30: 'peas'}
+ r = yield obj.list_fn([20, 30], 2)
+ obj.mock.assert_called_once_with([30], 2)
+ self.assertEqual(r, {20: 'chips', 30: 'peas'})
+ obj.mock.reset_mock()
+
+ # all the values should now be cached
+ r = yield obj.fn(10, 2)
+ self.assertEqual(r, 'fish')
+ r = yield obj.fn(20, 2)
+ self.assertEqual(r, 'chips')
+ r = yield obj.fn(30, 2)
+ self.assertEqual(r, 'peas')
+ r = yield obj.list_fn([10, 20, 30], 2)
+ obj.mock.assert_not_called()
+ self.assertEqual(r, {10: 'fish', 20: 'chips', 30: 'peas'})
+
+ @defer.inlineCallbacks
+ def test_invalidate(self):
+ """Make sure that invalidation callbacks are called."""
+ class Cls(object):
+ def __init__(self):
+ self.mock = mock.Mock()
+
+ @descriptors.cached()
+ def fn(self, arg1, arg2):
+ pass
+
+ @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
+ def list_fn(self, args1, arg2):
+ # we want this to behave like an asynchronous function
+ yield run_on_reactor()
+ defer.returnValue(self.mock(args1, arg2))
+
+ obj = Cls()
+ invalidate0 = mock.Mock()
+ invalidate1 = mock.Mock()
+
+ # cache miss
+ obj.mock.return_value = {10: 'fish', 20: 'chips'}
+ r1 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate0)
+ obj.mock.assert_called_once_with([10, 20], 2)
+ self.assertEqual(r1, {10: 'fish', 20: 'chips'})
+ obj.mock.reset_mock()
+
+ # cache hit
+ r2 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate1)
+ obj.mock.assert_not_called()
+ self.assertEqual(r2, {10: 'fish', 20: 'chips'})
+
+ invalidate0.assert_not_called()
+ invalidate1.assert_not_called()
+
+ # now if we invalidate the keys, both invalidations should get called
+ obj.fn.invalidate((10, 2))
+ invalidate0.assert_called_once()
+ invalidate1.assert_called_once()
diff --git a/tests/util/test_limiter.py b/tests/util/test_limiter.py
deleted file mode 100644
index a5a767b1..00000000
--- a/tests/util/test_limiter.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from twisted.internet import defer
-
-from synapse.util.async import Limiter
-
-from tests import unittest
-
-
-class LimiterTestCase(unittest.TestCase):
-
- @defer.inlineCallbacks
- def test_limiter(self):
- limiter = Limiter(3)
-
- key = object()
-
- d1 = limiter.queue(key)
- cm1 = yield d1
-
- d2 = limiter.queue(key)
- cm2 = yield d2
-
- d3 = limiter.queue(key)
- cm3 = yield d3
-
- d4 = limiter.queue(key)
- self.assertFalse(d4.called)
-
- d5 = limiter.queue(key)
- self.assertFalse(d5.called)
-
- with cm1:
- self.assertFalse(d4.called)
- self.assertFalse(d5.called)
-
- self.assertTrue(d4.called)
- self.assertFalse(d5.called)
-
- with cm3:
- self.assertFalse(d5.called)
-
- self.assertTrue(d5.called)
-
- with cm2:
- pass
-
- with (yield d4):
- pass
-
- with (yield d5):
- pass
-
- d6 = limiter.queue(key)
- with (yield d6):
- pass
diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py
index c95907b3..4729bd5a 100644
--- a/tests/util/test_linearizer.py
+++ b/tests/util/test_linearizer.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +17,7 @@
from six.moves import range
from twisted.internet import defer, reactor
+from twisted.internet.defer import CancelledError
from synapse.util import Clock, logcontext
from synapse.util.async import Linearizer
@@ -65,3 +67,79 @@ class LinearizerTestCase(unittest.TestCase):
func(i)
return func(1000)
+
+ @defer.inlineCallbacks
+ def test_multiple_entries(self):
+ limiter = Linearizer(max_count=3)
+
+ key = object()
+
+ d1 = limiter.queue(key)
+ cm1 = yield d1
+
+ d2 = limiter.queue(key)
+ cm2 = yield d2
+
+ d3 = limiter.queue(key)
+ cm3 = yield d3
+
+ d4 = limiter.queue(key)
+ self.assertFalse(d4.called)
+
+ d5 = limiter.queue(key)
+ self.assertFalse(d5.called)
+
+ with cm1:
+ self.assertFalse(d4.called)
+ self.assertFalse(d5.called)
+
+ cm4 = yield d4
+ self.assertFalse(d5.called)
+
+ with cm3:
+ self.assertFalse(d5.called)
+
+ cm5 = yield d5
+
+ with cm2:
+ pass
+
+ with cm4:
+ pass
+
+ with cm5:
+ pass
+
+ d6 = limiter.queue(key)
+ with (yield d6):
+ pass
+
+ @defer.inlineCallbacks
+ def test_cancellation(self):
+ linearizer = Linearizer()
+
+ key = object()
+
+ d1 = linearizer.queue(key)
+ cm1 = yield d1
+
+ d2 = linearizer.queue(key)
+ self.assertFalse(d2.called)
+
+ d3 = linearizer.queue(key)
+ self.assertFalse(d3.called)
+
+ d2.cancel()
+
+ with cm1:
+ pass
+
+ self.assertTrue(d2.called)
+ try:
+ yield d2
+ self.fail("Expected d2 to raise CancelledError")
+ except CancelledError:
+ pass
+
+ with (yield d3):
+ pass
diff --git a/tests/utils.py b/tests/utils.py
index e488238b..9bff3ff3 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -71,6 +71,8 @@ def setup_test_homeserver(name="test", datastore=None, config=None, reactor=None
config.user_directory_search_all_users = False
config.user_consent_server_notice_content = None
config.block_events_without_consent_error = None
+ config.media_storage_providers = []
+ config.auto_join_rooms = []
# disable user directory updates, because they get done in the
# background, which upsets the test runner.
@@ -136,6 +138,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, reactor=None
database_engine=db_engine,
room_list_handler=object(),
tls_server_context_factory=Mock(),
+ reactor=reactor,
**kargs
)
@@ -190,7 +193,7 @@ class MockHttpResource(HttpServer):
self.prefix = prefix
def trigger_get(self, path):
- return self.trigger("GET", path, None)
+ return self.trigger(b"GET", path, None)
@patch('twisted.web.http.Request')
@defer.inlineCallbacks
@@ -224,7 +227,7 @@ class MockHttpResource(HttpServer):
headers = {}
if federation_auth:
- headers[b"Authorization"] = ["X-Matrix origin=test,key=,sig="]
+ headers[b"Authorization"] = [b"X-Matrix origin=test,key=,sig="]
mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers)
# return the right path if the event requires it
@@ -238,6 +241,9 @@ class MockHttpResource(HttpServer):
except Exception:
pass
+ if isinstance(path, bytes):
+ path = path.decode('utf8')
+
for (method, pattern, func) in self.callbacks:
if http_method != method:
continue
@@ -246,7 +252,7 @@ class MockHttpResource(HttpServer):
if matcher:
try:
args = [
- urlparse.unquote(u).decode("UTF-8")
+ urlparse.unquote(u)
for u in matcher.groups()
]