summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2021-07-15 15:18:43 +0200
committerAndrej Shadura <andrewsh@debian.org>2021-07-15 15:18:43 +0200
commit24e6fe3b86ca73a4d6e8acc53aed72006c73007a (patch)
tree91c206c2433c8adbba7231999f9d8c868c988740
parent05c31404c2a8be9d68f67f451a4e224bc5b4ecb7 (diff)
parent539951e41b5b5e24989abd7f33c7535bbe0c8c61 (diff)
Merge branch 'debian/unstable' into debian/buster-fasttrack
-rw-r--r--.github/workflows/docs.yaml64
-rw-r--r--.github/workflows/release-artifacts.yml90
-rw-r--r--CHANGES.md136
-rw-r--r--INSTALL.md594
-rw-r--r--README.rst18
-rw-r--r--UPGRADE.rst1340
-rw-r--r--contrib/systemd/README.md3
-rw-r--r--debian/changelog7
-rw-r--r--debian/docs4
-rwxr-xr-xdebian/rules6
-rw-r--r--docker/README.md4
-rw-r--r--docs/.sample_config_header.yaml3
-rw-r--r--docs/MSC1711_certificates_FAQ.md2
-rw-r--r--docs/SUMMARY.md2
-rw-r--r--docs/admin_api/user_admin_api.md12
-rw-r--r--docs/modules.md2
-rw-r--r--docs/postgres.md4
-rw-r--r--docs/presence_router_module.md4
-rw-r--r--docs/sample_config.yaml109
-rw-r--r--docs/sample_log_config.yaml2
-rw-r--r--docs/setup/installation.md603
-rw-r--r--docs/upgrade.md1391
-rw-r--r--docs/upgrading/README.md7
-rw-r--r--mypy.ini1
-rwxr-xr-xscripts-dev/build_debian_packages17
-rwxr-xr-xscripts-dev/complement.sh2
-rwxr-xr-xscripts-dev/release.py52
-rwxr-xr-xscripts/synapse_port_db4
-rwxr-xr-xscripts/synapse_review_recent_signups19
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/_scripts/review_recent_signups.py175
-rw-r--r--synapse/api/auth.py79
-rw-r--r--synapse/api/constants.py6
-rw-r--r--synapse/app/_base.py11
-rw-r--r--synapse/config/_base.pyi2
-rw-r--r--synapse/config/cache.py70
-rw-r--r--synapse/config/consent.py2
-rw-r--r--synapse/config/database.py3
-rw-r--r--synapse/config/jwt.py2
-rw-r--r--synapse/config/logger.py2
-rw-r--r--synapse/config/modules.py2
-rw-r--r--synapse/config/oidc.py4
-rw-r--r--synapse/config/password_auth_providers.py2
-rw-r--r--synapse/config/registration.py21
-rw-r--r--synapse/config/repository.py2
-rw-r--r--synapse/config/server.py23
-rw-r--r--synapse/config/spam_checker.py2
-rw-r--r--synapse/config/stats.py2
-rw-r--r--synapse/config/tracer.py2
-rw-r--r--synapse/config/user_directory.py2
-rw-r--r--synapse/event_auth.py5
-rw-r--r--synapse/events/__init__.py2
-rw-r--r--synapse/events/builder.py77
-rw-r--r--synapse/federation/federation_base.py12
-rw-r--r--synapse/federation/federation_server.py232
-rw-r--r--synapse/federation/transport/server.py592
-rw-r--r--synapse/handlers/admin.py7
-rw-r--r--synapse/handlers/auth.py132
-rw-r--r--synapse/handlers/event_auth.py62
-rw-r--r--synapse/handlers/federation.py255
-rw-r--r--synapse/handlers/message.py16
-rw-r--r--synapse/handlers/register.py115
-rw-r--r--synapse/handlers/room.py3
-rw-r--r--synapse/handlers/space_summary.py17
-rw-r--r--synapse/http/server.py2
-rw-r--r--synapse/http/servlet.py50
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py4
-rw-r--r--synapse/replication/http/login.py13
-rw-r--r--synapse/rest/client/v1/login.py171
-rw-r--r--synapse/rest/client/v2_alpha/register.py88
-rw-r--r--synapse/rest/client/v2_alpha/sync.py69
-rw-r--r--synapse/storage/database.py2
-rw-r--r--synapse/storage/databases/main/event_federation.py114
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py186
-rw-r--r--synapse/storage/databases/main/lock.py15
-rw-r--r--synapse/storage/databases/main/profile.py8
-rw-r--r--synapse/storage/databases/main/registration.py207
-rw-r--r--synapse/storage/engines/_base.py6
-rw-r--r--synapse/storage/engines/postgres.py5
-rw-r--r--synapse/storage/engines/sqlite.py5
-rw-r--r--synapse/storage/schema/__init__.py2
-rw-r--r--synapse/storage/schema/main/delta/59/14refresh_tokens.sql34
-rw-r--r--synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres45
-rw-r--r--synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres30
-rw-r--r--synapse/util/caches/lrucache.py237
-rw-r--r--synapse/util/linked_list.py150
-rw-r--r--sytest-blacklist1
-rw-r--r--tests/api/test_auth.py1
-rw-r--r--tests/federation/transport/test_knocking.py4
-rw-r--r--tests/handlers/test_device.py2
-rw-r--r--tests/handlers/test_federation.py2
-rw-r--r--tests/handlers/test_presence.py4
-rw-r--r--tests/handlers/test_register.py49
-rw-r--r--tests/handlers/test_space_summary.py48
-rw-r--r--tests/replication/test_federation_sender_shard.py2
-rw-r--r--tests/rest/admin/test_user.py310
-rw-r--r--tests/rest/client/v1/utils.py3
-rw-r--r--tests/rest/client/v2_alpha/test_auth.py220
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py30
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py8
-rw-r--r--tests/util/test_lrucache.py46
102 files changed, 5664 insertions, 2956 deletions
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 23b8d7f9..808f8253 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -7,6 +7,8 @@ on:
- develop
# For documentation specific to a release
- 'release-v*'
+ # stable docs
+ - master
workflow_dispatch:
@@ -23,42 +25,42 @@ jobs:
mdbook-version: '0.4.9'
- name: Build the documentation
- run: mdbook build
-
- # Deploy to the latest documentation directories
- - name: Deploy latest documentation
- uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- keep_files: true
- publish_dir: ./book
- destination_dir: ./develop
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
- - name: Get the current Synapse version
+ # Figure out the target directory.
+ #
+ # The target directory depends on the name of the branch
+ #
+ - name: Get the target directory name
id: vars
- # The $GITHUB_REF value for a branch looks like `refs/heads/release-v1.2`. We do some
- # shell magic to remove the "refs/heads/release-v" bit from this, to end up with "1.2",
- # our major/minor version number, and set this to a var called `branch-version`.
- #
- # We then use some python to get Synapse's full version string, which may look
- # like "1.2.3rc4". We set this to a var called `synapse-version`. We use this
- # to determine if this release is still an RC, and if so block deployment.
run: |
- echo ::set-output name=branch-version::${GITHUB_REF#refs/heads/release-v}
- echo ::set-output name=synapse-version::`python3 -c 'import synapse; print(synapse.__version__)'`
+ # first strip the 'refs/heads/' prefix with some shell foo
+ branch="${GITHUB_REF#refs/heads/}"
- # Deploy to the version-specific directory
- - name: Deploy release-specific documentation
- # We only carry out this step if we're running on a release branch,
- # and the current Synapse version does not have "rc" in the name.
- #
- # The result is that only full releases are deployed, but can be
- # updated if the release branch gets retroactive fixes.
- if: ${{ startsWith( github.ref, 'refs/heads/release-v' ) && !contains( steps.vars.outputs.synapse-version, 'rc') }}
- uses: peaceiris/actions-gh-pages@v3
+ case $branch in
+ release-*)
+ # strip 'release-' from the name for release branches.
+ branch="${branch#release-}"
+ ;;
+ master)
+ # deploy to "latest" for the master branch.
+ branch="latest"
+ ;;
+ esac
+
+ # finally, set the 'branch-version' var.
+ echo "::set-output name=branch-version::$branch"
+
+ # Deploy to the target directory.
+ - name: Deploy to gh pages
+ uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
keep_files: true
publish_dir: ./book
- # The resulting documentation will end up in a directory named `vX.Y`.
- destination_dir: ./v${{ steps.vars.outputs.branch-version }}
+ destination_dir: ./${{ steps.vars.outputs.branch-version }}
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
new file mode 100644
index 00000000..f292d703
--- /dev/null
+++ b/.github/workflows/release-artifacts.yml
@@ -0,0 +1,90 @@
+# GitHub actions workflow which builds the release artifacts.
+
+name: Build release artifacts
+
+on:
+ push:
+ # we build on develop and release branches to (hopefully) get early warning
+ # of things breaking
+ branches: ["develop", "release-*"]
+
+ # we also rebuild on tags, so that we can be sure of picking the artifacts
+ # from the right tag.
+ tags: ["v*"]
+
+permissions:
+ contents: write
+
+jobs:
+ # first get the list of distros to build for.
+ get-distros:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - id: set-distros
+ run: |
+ echo "::set-output name=distros::$(scripts-dev/build_debian_packages --show-dists-json)"
+ # map the step outputs to job outputs
+ outputs:
+ distros: ${{ steps.set-distros.outputs.distros }}
+
+ # now build the packages with a matrix build.
+ build-debs:
+ needs: get-distros
+ name: "Build .deb packages"
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ path: src
+ - uses: actions/setup-python@v2
+ - run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
+ - uses: actions/upload-artifact@v2
+ with:
+ name: debs
+ path: debs/*
+
+ build-sdist:
+ name: "Build pypi distribution files"
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - run: pip install wheel
+ - run: |
+ python setup.py sdist bdist_wheel
+ - uses: actions/upload-artifact@v2
+ with:
+ name: python-dist
+ path: dist/*
+
+ # if it's a tag, create a release and attach the artifacts to it
+ attach-assets:
+ name: "Attach assets to release"
+ if: startsWith(github.ref, 'refs/tags/')
+ needs:
+ - build-debs
+ - build-sdist
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download all workflow run artifacts
+ uses: actions/download-artifact@v2
+ - name: Build a tarball for the debs
+ run: tar -cvJf debs.tar.xz debs
+ - name: Attach to release
+ uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ files: |
+ python-dist/*
+ debs.tar.xz
+ # if it's not already published, keep the release as a draft.
+ draft: true
+ # mark it as a prerelease if the tag contains 'rc'.
+ prerelease: ${{ contains(github.ref, 'rc') }}
diff --git a/CHANGES.md b/CHANGES.md
index 7b6e052a..82baaa2d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,85 @@
+Synapse 1.38.0 (2021-07-13)
+===========================
+
+This release includes a database schema update which could result in elevated disk usage. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1380) for more information.
+
+No significant changes since 1.38.0rc3.
+
+
+Synapse 1.38.0rc3 (2021-07-13)
+==============================
+
+Internal Changes
+----------------
+
+- Build the Debian packages in CI. ([\#10247](https://github.com/matrix-org/synapse/issues/10247), [\#10379](https://github.com/matrix-org/synapse/issues/10379))
+
+
+Synapse 1.38.0rc2 (2021-07-09)
+==============================
+
+Bugfixes
+--------
+
+- Fix bug where inbound federation in a room could be delayed due to not correctly dropping a lock. Introduced in v1.37.1. ([\#10336](https://github.com/matrix-org/synapse/issues/10336))
+
+
+Improved Documentation
+----------------------
+
+- Update links to documentation in the sample config. Contributed by @dklimpel. ([\#10287](https://github.com/matrix-org/synapse/issues/10287))
+- Fix broken links in [INSTALL.md](INSTALL.md). Contributed by @dklimpel. ([\#10331](https://github.com/matrix-org/synapse/issues/10331))
+
+
+Synapse 1.38.0rc1 (2021-07-06)
+==============================
+
+Features
+--------
+
+- Implement refresh tokens as specified by [MSC2918](https://github.com/matrix-org/matrix-doc/pull/2918). ([\#9450](https://github.com/matrix-org/synapse/issues/9450))
+- Add support for evicting cache entries based on last access time. ([\#10205](https://github.com/matrix-org/synapse/issues/10205))
+- Omit empty fields from the `/sync` response. Contributed by @deepbluev7. ([\#10214](https://github.com/matrix-org/synapse/issues/10214))
+- Improve validation on federation `send_{join,leave,knock}` endpoints. ([\#10225](https://github.com/matrix-org/synapse/issues/10225), [\#10243](https://github.com/matrix-org/synapse/issues/10243))
+- Add SSO `external_ids` to the Query User Account admin API. ([\#10261](https://github.com/matrix-org/synapse/issues/10261))
+- Mark events received over federation which fail a spam check as "soft-failed". ([\#10263](https://github.com/matrix-org/synapse/issues/10263))
+- Add metrics for new inbound federation staging area. ([\#10284](https://github.com/matrix-org/synapse/issues/10284))
+- Add script to print information about recently registered users. ([\#10290](https://github.com/matrix-org/synapse/issues/10290))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug which meant that invite rejections and knocks were not sent out over federation in a timely manner. ([\#10223](https://github.com/matrix-org/synapse/issues/10223))
+- Fix a bug introduced in v1.26.0 where only users who have set profile information could be deactivated with erasure enabled. ([\#10252](https://github.com/matrix-org/synapse/issues/10252))
+- Fix a long-standing bug where Synapse would return errors after 2<sup>31</sup> events were handled by the server. ([\#10264](https://github.com/matrix-org/synapse/issues/10264), [\#10267](https://github.com/matrix-org/synapse/issues/10267), [\#10282](https://github.com/matrix-org/synapse/issues/10282), [\#10286](https://github.com/matrix-org/synapse/issues/10286), [\#10291](https://github.com/matrix-org/synapse/issues/10291), [\#10314](https://github.com/matrix-org/synapse/issues/10314), [\#10326](https://github.com/matrix-org/synapse/issues/10326))
+- Fix the prometheus `synapse_federation_server_pdu_process_time` metric. Broke in v1.37.1. ([\#10279](https://github.com/matrix-org/synapse/issues/10279))
+- Ensure that inbound events from federation that were being processed when Synapse was restarted get promptly processed on start up. ([\#10303](https://github.com/matrix-org/synapse/issues/10303))
+
+
+Improved Documentation
+----------------------
+
+- Move the upgrade notes to [docs/upgrade.md](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md) and convert them to markdown. ([\#10166](https://github.com/matrix-org/synapse/issues/10166))
+- Choose Welcome & Overview as the default page for synapse documentation website. ([\#10242](https://github.com/matrix-org/synapse/issues/10242))
+- Adjust the URL in the README.rst file to point to irc.libera.chat. ([\#10258](https://github.com/matrix-org/synapse/issues/10258))
+- Fix homeserver config option name in presence router documentation. ([\#10288](https://github.com/matrix-org/synapse/issues/10288))
+- Fix link pointing at the wrong section in the modules documentation page. ([\#10302](https://github.com/matrix-org/synapse/issues/10302))
+
+
+Internal Changes
+----------------
+
+- Drop `Origin` and `Accept` from the value of the `Access-Control-Allow-Headers` response header. ([\#10114](https://github.com/matrix-org/synapse/issues/10114))
+- Add type hints to the federation servlets. ([\#10213](https://github.com/matrix-org/synapse/issues/10213))
+- Improve the reliability of auto-joining remote rooms. ([\#10237](https://github.com/matrix-org/synapse/issues/10237))
+- Update the release script to use the semver terminology and determine the release branch based on the next version. ([\#10239](https://github.com/matrix-org/synapse/issues/10239))
+- Fix type hints for computing auth events. ([\#10253](https://github.com/matrix-org/synapse/issues/10253))
+- Improve the performance of the spaces summary endpoint by only recursing into spaces (and not rooms in general). ([\#10256](https://github.com/matrix-org/synapse/issues/10256))
+- Move event authentication methods from `Auth` to `EventAuthHandler`. ([\#10268](https://github.com/matrix-org/synapse/issues/10268))
+- Re-enable a SyTest after it has been fixed. ([\#10292](https://github.com/matrix-org/synapse/issues/10292))
+
+
Synapse 1.37.1 (2021-06-30)
===========================
@@ -775,7 +857,7 @@ Internal Changes
Synapse 1.29.0 (2021-03-08)
===========================
-Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
+Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see the [upgrade notes](docs/upgrade.md#upgrading-to-v1290) for more details on this change.
No significant changes.
@@ -840,7 +922,7 @@ Synapse 1.28.0 (2021-02-25)
Note that this release drops support for ARMv7 in the official Docker images, due to repeated problems building for ARMv7 (and the associated maintenance burden this entails).
-This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
+This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review the [upgrade notes](docs/upgrade.md) for more details on these changes.
Internal Changes
@@ -939,9 +1021,9 @@ Synapse 1.27.0 (2021-02-16)
Note that this release includes a change in Synapse to use Redis as a cache ─ as well as a pub/sub mechanism ─ if Redis support is enabled for workers. No action is needed by server administrators, and we do not expect resource usage of the Redis instance to change dramatically.
-This release also changes the callback URI for OpenID Connect (OIDC) and SAML2 identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 or SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
+This release also changes the callback URI for OpenID Connect (OIDC) and SAML2 identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 or SAML2 IdP, you may need to make configuration changes. Please review the [upgrade notes](docs/upgrade.md) for more details on these changes.
-This release also changes escaping of variables in the HTML templates for SSO or email notifications. If you have customised these templates, please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
+This release also changes escaping of variables in the HTML templates for SSO or email notifications. If you have customised these templates, please review the [upgrade notes](docs/upgrade.md) for more details on these changes.
Bugfixes
@@ -1045,7 +1127,7 @@ Synapse 1.26.0 (2021-01-27)
===========================
This release brings a new schema version for Synapse and rolling back to a previous
-version is not trivial. Please review [UPGRADE.rst](UPGRADE.rst) for more details
+version is not trivial. Please review the [upgrade notes](docs/upgrade.md) for more details
on these changes and for general upgrade guidance.
No significant changes since 1.26.0rc2.
@@ -1072,7 +1154,7 @@ Synapse 1.26.0rc1 (2021-01-20)
==============================
This release brings a new schema version for Synapse and rolling back to a previous
-version is not trivial. Please review [UPGRADE.rst](UPGRADE.rst) for more details
+version is not trivial. Please review the [upgrade notes](docs/upgrade.md) for more details
on these changes and for general upgrade guidance.
Features
@@ -1175,7 +1257,10 @@ Crucially, this means __we will not produce .deb packages for Debian 9 (Stretch)
The website https://endoflife.date/ has convenient summaries of the support schedules for projects like [Python](https://endoflife.date/python) and [PostgreSQL](https://endoflife.date/postgresql).
-If you are unable to upgrade your environment to a supported version of Python or Postgres, we encourage you to consider using the [Synapse Docker images](./INSTALL.md#docker-images-and-ansible-playbooks) instead.
+If you are unable to upgrade your environment to a supported version of Python or
+Postgres, we encourage you to consider using the
+[Synapse Docker images](https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks)
+instead.
### Transition Period
@@ -1318,11 +1403,11 @@ To upgrade Synapse along with the cryptography package:
* Administrators using the [`matrix.org` Docker
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
packages from
- `matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
+ `matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
should ensure that they have version 1.24.0 or 1.23.1 installed: these images include
the updated packages.
* Administrators who have [installed Synapse from
- source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
+ source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
should upgrade the cryptography package within their virtualenv by running:
```sh
<path_to_virtualenv>/bin/pip install 'cryptography>=3.3'
@@ -1364,11 +1449,11 @@ To upgrade Synapse along with the cryptography package:
* Administrators using the [`matrix.org` Docker
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
packages from
- `matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
+ `matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
should ensure that they have version 1.24.0 or 1.23.1 installed: these images include
the updated packages.
* Administrators who have [installed Synapse from
- source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
+ source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
should upgrade the cryptography package within their virtualenv by running:
```sh
<path_to_virtualenv>/bin/pip install 'cryptography>=3.3'
@@ -1478,7 +1563,7 @@ Internal Changes
Synapse 1.23.0 (2020-11-18)
===========================
-This release changes the way structured logging is configured. See the [upgrade notes](UPGRADE.rst#upgrading-to-v1230) for details.
+This release changes the way structured logging is configured. See the [upgrade notes](docs/upgrade.md#upgrading-to-v1230) for details.
**Note**: We are aware of a trivially exploitable denial of service vulnerability in versions of Synapse prior to 1.20.0. Complete details will be disclosed on Monday, November 23rd. If you have not upgraded recently, please do so.
@@ -2081,7 +2166,10 @@ No significant changes since 1.19.0rc1.
Removal warning
---------------
-As outlined in the [previous release](https://github.com/matrix-org/synapse/releases/tag/v1.18.0), we are no longer publishing Docker images with the `-py3` tag suffix. On top of that, we have also removed the `latest-py3` tag. Please see [the announcement in the upgrade notes for 1.18.0](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180).
+As outlined in the [previous release](https://github.com/matrix-org/synapse/releases/tag/v1.18.0),
+we are no longer publishing Docker images with the `-py3` tag suffix. On top of that, we have also removed the
+`latest-py3` tag. Please see
+[the announcement in the upgrade notes for 1.18.0](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1180).
Synapse 1.19.0rc1 (2020-08-13)
@@ -2112,7 +2200,7 @@ Bugfixes
Updates to the Docker image
---------------------------
-- We no longer publish Docker images with the `-py3` tag suffix, as [announced in the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180). ([\#8056](https://github.com/matrix-org/synapse/issues/8056))
+- We no longer publish Docker images with the `-py3` tag suffix, as [announced in the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1180). ([\#8056](https://github.com/matrix-org/synapse/issues/8056))
Improved Documentation
@@ -2670,7 +2758,7 @@ configurations of Synapse:
to be incomplete or empty if Synapse was upgraded directly from v1.2.1 or
earlier, to versions between v1.4.0 and v1.12.x.
-Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes
+Please review the [upgrade notes](docs/upgrade.md) for more details on these changes
and for general upgrade guidance.
@@ -2771,7 +2859,7 @@ Bugfixes
- Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map. ([\#7341](https://github.com/matrix-org/synapse/issues/7341))
- Fix incorrect metrics reporting for `renew_attestations` background task. ([\#7344](https://github.com/matrix-org/synapse/issues/7344))
- Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included. ([\#7367](https://github.com/matrix-org/synapse/issues/7367))
-- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](UPGRADE.rst#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387))
+- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387))
- Fix bug in `EventContext.deserialize`. ([\#7393](https://github.com/matrix-org/synapse/issues/7393))
@@ -2921,7 +3009,7 @@ Synapse 1.12.0 includes a database update which is run as part of the upgrade,
and which may take some time (several hours in the case of a large
server). Synapse will not respond to HTTP requests while this update is taking
place. For imformation on seeing if you are affected, and workaround if you
-are, see the [upgrade notes](UPGRADE.rst#upgrading-to-v1120).
+are, see the [upgrade notes](docs/upgrade.md#upgrading-to-v1120).
Security advisory
-----------------
@@ -2944,11 +3032,11 @@ installation remains secure.
* Administrators using the [`matrix.org` Docker
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
packages from
- `matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
+ `matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
should ensure that they have version 1.12.0 installed: these images include
Twisted 20.3.0.
* Administrators who have [installed Synapse from
- source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
+ source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
should upgrade Twisted within their virtualenv by running:
```sh
<path_to_virtualenv>/bin/pip install 'Twisted>=20.3.0'
@@ -3474,7 +3562,7 @@ Bugfixes
Synapse 1.7.0 (2019-12-13)
==========================
-This release changes the default settings so that only local authenticated users can query the server's room directory. See the [upgrade notes](UPGRADE.rst#upgrading-to-v170) for details.
+This release changes the default settings so that only local authenticated users can query the server's room directory. See the [upgrade notes](docs/upgrade.md#upgrading-to-v170) for details.
Support for SQLite versions before 3.11 is now deprecated. A future release will refuse to start if used with an SQLite version before 3.11.
@@ -3838,7 +3926,7 @@ Synapse 1.4.0rc1 (2019-09-26)
=============================
Note that this release includes significant changes around 3pid
-verification. Administrators are reminded to review the [upgrade notes](UPGRADE.rst#upgrading-to-v140).
+verification. Administrators are reminded to review the [upgrade notes](docs/upgrade.md#upgrading-to-v140).
Features
--------
@@ -4214,7 +4302,7 @@ Synapse 1.1.0 (2019-07-04)
==========================
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
-See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
+See the [upgrade notes](docs/upgrade.md#upgrading-to-v110) for more details.
This release also deprecates the use of environment variables to configure the
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
@@ -4244,7 +4332,7 @@ Synapse 1.1.0rc1 (2019-07-02)
=============================
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
-See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
+See the [upgrade notes](docs/upgrade.md#upgrading-to-v110) for more details.
Features
--------
@@ -5016,7 +5104,7 @@ run on Python versions 3.5 or 3.6 (as well as 2.7). Support for Python 3.7
remains experimental.
We recommend upgrading to Python 3, but make sure to read the [upgrade
-notes](UPGRADE.rst#upgrading-to-v0340) when doing so.
+notes](docs/upgrade.md#upgrading-to-v0340) when doing so.
Features
--------
diff --git a/INSTALL.md b/INSTALL.md
index b0697052..f199b233 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -1,593 +1,7 @@
# Installation Instructions
-There are 3 steps to follow under **Installation Instructions**.
+This document has moved to the
+[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
+Please update your links.
-- [Installation Instructions](#installation-instructions)
- - [Choosing your server name](#choosing-your-server-name)
- - [Installing Synapse](#installing-synapse)
- - [Installing from source](#installing-from-source)
- - [Platform-specific prerequisites](#platform-specific-prerequisites)
- - [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
- - [ArchLinux](#archlinux)
- - [CentOS/Fedora](#centosfedora)
- - [macOS](#macos)
- - [OpenSUSE](#opensuse)
- - [OpenBSD](#openbsd)
- - [Windows](#windows)
- - [Prebuilt packages](#prebuilt-packages)
- - [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
- - [Debian/Ubuntu](#debianubuntu)
- - [Matrix.org packages](#matrixorg-packages)
- - [Downstream Debian packages](#downstream-debian-packages)
- - [Downstream Ubuntu packages](#downstream-ubuntu-packages)
- - [Fedora](#fedora)
- - [OpenSUSE](#opensuse-1)
- - [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
- - [ArchLinux](#archlinux-1)
- - [Void Linux](#void-linux)
- - [FreeBSD](#freebsd)
- - [OpenBSD](#openbsd-1)
- - [NixOS](#nixos)
- - [Setting up Synapse](#setting-up-synapse)
- - [Using PostgreSQL](#using-postgresql)
- - [TLS certificates](#tls-certificates)
- - [Client Well-Known URI](#client-well-known-uri)
- - [Email](#email)
- - [Registering a user](#registering-a-user)
- - [Setting up a TURN server](#setting-up-a-turn-server)
- - [URL previews](#url-previews)
- - [Troubleshooting Installation](#troubleshooting-installation)
-
-
-## Choosing your server name
-
-It is important to choose the name for your server before you install Synapse,
-because it cannot be changed later.
-
-The server name determines the "domain" part of user-ids for users on your
-server: these will all be of the format `@user:my.domain.name`. It also
-determines how other matrix servers will reach yours for federation.
-
-For a test configuration, set this to the hostname of your server. For a more
-production-ready setup, you will probably want to specify your domain
-(`example.com`) rather than a matrix-specific hostname here (in the same way
-that your email address is probably `user@example.com` rather than
-`user@email.example.com`) - but doing so may require more advanced setup: see
-[Setting up Federation](docs/federate.md).
-
-## Installing Synapse
-
-### Installing from source
-
-(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
-
-When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
-
-System requirements:
-
-- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.5.2 or later, up to Python 3.9.
-- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
-
-
-To install the Synapse homeserver run:
-
-```sh
-mkdir -p ~/synapse
-virtualenv -p python3 ~/synapse/env
-source ~/synapse/env/bin/activate
-pip install --upgrade pip
-pip install --upgrade setuptools
-pip install matrix-synapse
-```
-
-This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
-and install it, along with the python libraries it uses, into a virtual environment
-under `~/synapse/env`. Feel free to pick a different directory if you
-prefer.
-
-This Synapse installation can then be later upgraded by using pip again with the
-update flag:
-
-```sh
-source ~/synapse/env/bin/activate
-pip install -U matrix-synapse
-```
-
-Before you can start Synapse, you will need to generate a configuration
-file. To do this, run (in your virtualenv, as before):
-
-```sh
-cd ~/synapse
-python -m synapse.app.homeserver \
- --server-name my.domain.name \
- --config-path homeserver.yaml \
- --generate-config \
- --report-stats=[yes|no]
-```
-
-... substituting an appropriate value for `--server-name`.
-
-This command will generate you a config file that you can then customise, but it will
-also generate a set of keys for you. These keys will allow your homeserver to
-identify itself to other homeserver, so don't lose or delete them. It would be
-wise to back them up somewhere safe. (If, for whatever reason, you do need to
-change your homeserver's keys, you may find that other homeserver have the
-old key cached. If you update the signing key, you should change the name of the
-key in the `<server name>.signing.key` file (the second word) to something
-different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
-
-To actually run your new homeserver, pick a working directory for Synapse to
-run (e.g. `~/synapse`), and:
-
-```sh
-cd ~/synapse
-source env/bin/activate
-synctl start
-```
-
-#### Platform-specific prerequisites
-
-Synapse is written in Python but some of the libraries it uses are written in
-C. So before we can install Synapse itself we need a working C compiler and the
-header files for Python C extensions.
-
-##### Debian/Ubuntu/Raspbian
-
-Installing prerequisites on Ubuntu or Debian:
-
-```sh
-sudo apt install build-essential python3-dev libffi-dev \
- python3-pip python3-setuptools sqlite3 \
- libssl-dev virtualenv libjpeg-dev libxslt1-dev
-```
-
-##### ArchLinux
-
-Installing prerequisites on ArchLinux:
-
-```sh
-sudo pacman -S base-devel python python-pip \
- python-setuptools python-virtualenv sqlite3
-```
-
-##### CentOS/Fedora
-
-Installing prerequisites on CentOS or Fedora Linux:
-
-```sh
-sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
- libwebp-devel libxml2-devel libxslt-devel libpq-devel \
- python3-virtualenv libffi-devel openssl-devel python3-devel
-sudo dnf groupinstall "Development Tools"
-```
-
-##### macOS
-
-Installing prerequisites on macOS:
-
-```sh
-xcode-select --install
-sudo easy_install pip
-sudo pip install virtualenv
-brew install pkg-config libffi
-```
-
-On macOS Catalina (10.15) you may need to explicitly install OpenSSL
-via brew and inform `pip` about it so that `psycopg2` builds:
-
-```sh
-brew install openssl@1.1
-export LDFLAGS="-L/usr/local/opt/openssl/lib"
-export CPPFLAGS="-I/usr/local/opt/openssl/include"
-```
-
-##### OpenSUSE
-
-Installing prerequisites on openSUSE:
-
-```sh
-sudo zypper in -t pattern devel_basis
-sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
- python-devel libffi-devel libopenssl-devel libjpeg62-devel
-```
-
-##### OpenBSD
-
-A port of Synapse is available under `net/synapse`. The filesystem
-underlying the homeserver directory (defaults to `/var/synapse`) has to be
-mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
-and mounting it to `/var/synapse` should be taken into consideration.
-
-To be able to build Synapse's dependency on python the `WRKOBJDIR`
-(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
-mounted with `wxallowed` (cf. `mount(8)`).
-
-Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
-default OpenBSD installation is mounted with `wxallowed`):
-
-```sh
-doas mkdir /usr/local/pobj_wxallowed
-```
-
-Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
-configured in `/etc/mk.conf`:
-
-```sh
-doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
-```
-
-Setting the `WRKOBJDIR` for building python:
-
-```sh
-echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
-```
-
-Building Synapse:
-
-```sh
-cd /usr/ports/net/synapse
-make install
-```
-
-##### Windows
-
-If you wish to run or develop Synapse on Windows, the Windows Subsystem For
-Linux provides a Linux environment on Windows 10 which is capable of using the
-Debian, Fedora, or source installation methods. More information about WSL can
-be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
-Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
-for Windows Server.
-
-### Prebuilt packages
-
-As an alternative to installing from source, prebuilt packages are available
-for a number of platforms.
-
-#### Docker images and Ansible playbooks
-
-There is an official synapse image available at
-<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
-the docker-compose file available at [contrib/docker](contrib/docker). Further
-information on this including configuration options is available in the README
-on hub.docker.com.
-
-Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
-Dockerfile to automate a synapse server in a single Docker image, at
-<https://hub.docker.com/r/avhost/docker-matrix/tags/>
-
-Slavi Pantaleev has created an Ansible playbook,
-which installs the offical Docker image of Matrix Synapse
-along with many other Matrix-related services (Postgres database, Element, coturn,
-ma1sd, SSL support, etc.).
-For more details, see
-<https://github.com/spantaleev/matrix-docker-ansible-deploy>
-
-#### Debian/Ubuntu
-
-##### Matrix.org packages
-
-Matrix.org provides Debian/Ubuntu packages of the latest stable version of
-Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
-9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
-
-```sh
-sudo apt install -y lsb-release wget apt-transport-https
-sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
-echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
- sudo tee /etc/apt/sources.list.d/matrix-org.list
-sudo apt update
-sudo apt install matrix-synapse-py3
-```
-
-**Note**: if you followed a previous version of these instructions which
-recommended using `apt-key add` to add an old key from
-`https://matrix.org/packages/debian/`, you should note that this key has been
-revoked. You should remove the old key with `sudo apt-key remove
-C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
-update your configuration.
-
-The fingerprint of the repository signing key (as shown by `gpg
-/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
-`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
-
-##### Downstream Debian packages
-
-We do not recommend using the packages from the default Debian `buster`
-repository at this time, as they are old and suffer from known security
-vulnerabilities. You can install the latest version of Synapse from
-[our repository](#matrixorg-packages) or from `buster-backports`. Please
-see the [Debian documentation](https://backports.debian.org/Instructions/)
-for information on how to use backports.
-
-If you are using Debian `sid` or testing, Synapse is available in the default
-repositories and it should be possible to install it simply with:
-
-```sh
-sudo apt install matrix-synapse
-```
-
-##### Downstream Ubuntu packages
-
-We do not recommend using the packages in the default Ubuntu repository
-at this time, as they are old and suffer from known security vulnerabilities.
-The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
-
-#### Fedora
-
-Synapse is in the Fedora repositories as `matrix-synapse`:
-
-```sh
-sudo dnf install matrix-synapse
-```
-
-Oleg Girko provides Fedora RPMs at
-<https://obs.infoserver.lv/project/monitor/matrix-synapse>
-
-#### OpenSUSE
-
-Synapse is in the OpenSUSE repositories as `matrix-synapse`:
-
-```sh
-sudo zypper install matrix-synapse
-```
-
-#### SUSE Linux Enterprise Server
-
-Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
-<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
-
-#### ArchLinux
-
-The quickest way to get up and running with ArchLinux is probably with the community package
-<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
-the necessary dependencies.
-
-pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
-
-```sh
-sudo pip install --upgrade pip
-```
-
-If you encounter an error with lib bcrypt causing an Wrong ELF Class:
-ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
-compile it under the right architecture. (This should not be needed if
-installing under virtualenv):
-
-```sh
-sudo pip uninstall py-bcrypt
-sudo pip install py-bcrypt
-```
-
-#### Void Linux
-
-Synapse can be found in the void repositories as 'synapse':
-
-```sh
-xbps-install -Su
-xbps-install -S synapse
-```
-
-#### FreeBSD
-
-Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
-
-- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
-- Packages: `pkg install py37-matrix-synapse`
-
-#### OpenBSD
-
-As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
-underlying the homeserver directory (defaults to `/var/synapse`) has to be
-mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
-and mounting it to `/var/synapse` should be taken into consideration.
-
-Installing Synapse:
-
-```sh
-doas pkg_add synapse
-```
-
-#### NixOS
-
-Robin Lambertz has packaged Synapse for NixOS at:
-<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
-
-## Setting up Synapse
-
-Once you have installed synapse as above, you will need to configure it.
-
-### Using PostgreSQL
-
-By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
-performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
-instead. Advantages include:
-
-- significant performance improvements due to the superior threading and
- caching model, smarter query optimiser
-- allowing the DB to be run on separate hardware
-
-For information on how to install and use PostgreSQL in Synapse, please see
-[docs/postgres.md](docs/postgres.md)
-
-SQLite is only acceptable for testing purposes. SQLite should not be used in
-a production server. Synapse will perform poorly when using
-SQLite, especially when participating in large rooms.
-
-### TLS certificates
-
-The default configuration exposes a single HTTP port on the local
-interface: `http://localhost:8008`. It is suitable for local testing,
-but for any practical use, you will need Synapse's APIs to be served
-over HTTPS.
-
-The recommended way to do so is to set up a reverse proxy on port
-`8448`. You can find documentation on doing so in
-[docs/reverse_proxy.md](docs/reverse_proxy.md).
-
-Alternatively, you can configure Synapse to expose an HTTPS port. To do
-so, you will need to edit `homeserver.yaml`, as follows:
-
-- First, under the `listeners` section, uncomment the configuration for the
- TLS-enabled listener. (Remove the hash sign (`#`) at the start of
- each line). The relevant lines are like this:
-
-```yaml
- - port: 8448
- type: http
- tls: true
- resources:
- - names: [client, federation]
- ```
-
-- You will also need to uncomment the `tls_certificate_path` and
- `tls_private_key_path` lines under the `TLS` section. You will need to manage
- provisioning of these certificates yourself.
-
- If you are using your own certificate, be sure to use a `.pem` file that
- includes the full certificate chain including any intermediate certificates
- (for instance, if using certbot, use `fullchain.pem` as your certificate, not
- `cert.pem`).
-
-For a more detailed guide to configuring your server for federation, see
-[federate.md](docs/federate.md).
-
-### Client Well-Known URI
-
-Setting up the client Well-Known URI is optional but if you set it up, it will
-allow users to enter their full username (e.g. `@user:<server_name>`) into clients
-which support well-known lookup to automatically configure the homeserver and
-identity server URLs. This is useful so that users don't have to memorize or think
-about the actual homeserver URL you are using.
-
-The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
-the following format.
-
-```json
-{
- "m.homeserver": {
- "base_url": "https://<matrix.example.com>"
- }
-}
-```
-
-It can optionally contain identity server information as well.
-
-```json
-{
- "m.homeserver": {
- "base_url": "https://<matrix.example.com>"
- },
- "m.identity_server": {
- "base_url": "https://<identity.example.com>"
- }
-}
-```
-
-To work in browser based clients, the file must be served with the appropriate
-Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
-`Access-Control-Allow-Origin: *` which would allow all browser based clients to
-view it.
-
-In nginx this would be something like:
-
-```nginx
-location /.well-known/matrix/client {
- return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
- default_type application/json;
- add_header Access-Control-Allow-Origin *;
-}
-```
-
-You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
-correctly. `public_baseurl` should be set to the URL that clients will use to
-connect to your server. This is the same URL you put for the `m.homeserver`
-`base_url` above.
-
-```yaml
-public_baseurl: "https://<matrix.example.com>"
-```
-
-### Email
-
-It is desirable for Synapse to have the capability to send email. This allows
-Synapse to send password reset emails, send verifications when an email address
-is added to a user's account, and send email notifications to users when they
-receive new messages.
-
-To configure an SMTP server for Synapse, modify the configuration section
-headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
-and `notif_from` fields filled out. You may also need to set `smtp_user`,
-`smtp_pass`, and `require_transport_security`.
-
-If email is not configured, password reset, registration and notifications via
-email will be disabled.
-
-### Registering a user
-
-The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
-
-Alternatively, you can do so from the command line. This can be done as follows:
-
- 1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
- installed via a prebuilt package, `register_new_matrix_user` should already be
- on the search path):
- ```sh
- cd ~/synapse
- source env/bin/activate
- synctl start # if not already running
- ```
- 2. Run the following command:
- ```sh
- register_new_matrix_user -c homeserver.yaml http://localhost:8008
- ```
-
-This will prompt you to add details for the new user, and will then connect to
-the running Synapse to create the new user. For example:
-```
-New user localpart: erikj
-Password:
-Confirm password:
-Make admin [no]:
-Success!
-```
-
-This process uses a setting `registration_shared_secret` in
-`homeserver.yaml`, which is shared between Synapse itself and the
-`register_new_matrix_user` script. It doesn't matter what it is (a random
-value is generated by `--generate-config`), but it should be kept secret, as
-anyone with knowledge of it can register users, including admin accounts,
-on your server even if `enable_registration` is `false`.
-
-### Setting up a TURN server
-
-For reliable VoIP calls to be routed via this homeserver, you MUST configure
-a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
-
-### URL previews
-
-Synapse includes support for previewing URLs, which is disabled by default. To
-turn it on you must enable the `url_preview_enabled: True` config parameter
-and explicitly specify the IP ranges that Synapse is not allowed to spider for
-previewing in the `url_preview_ip_range_blacklist` configuration parameter.
-This is critical from a security perspective to stop arbitrary Matrix users
-spidering 'internal' URLs on your network. At the very least we recommend that
-your loopback and RFC1918 IP addresses are blacklisted.
-
-This also requires the optional `lxml` python dependency to be installed. This
-in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
-means `apt-get install libxml2-dev`, or equivalent for your OS.
-
-### Troubleshooting Installation
-
-`pip` seems to leak *lots* of memory during installation. For instance, a Linux
-host with 512MB of RAM may run out of memory whilst installing Twisted. If this
-happens, you will have to individually install the dependencies which are
-failing, e.g.:
-
-```sh
-pip install twisted
-```
-
-If you have any other problems, feel free to ask in
-[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
+The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).
diff --git a/README.rst b/README.rst
index 2ecc93c8..0ae05616 100644
--- a/README.rst
+++ b/README.rst
@@ -25,7 +25,7 @@ The overall architecture is::
``#matrix:matrix.org`` is the official support room for Matrix, and can be
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
-via IRC bridge at irc://irc.freenode.net/matrix.
+via IRC bridge at irc://irc.libera.chat/matrix.
Synapse is currently in rapid development, but as of version 0.5 we believe it
is sufficiently stable to be run as an internet-facing service for real usage!
@@ -94,7 +94,8 @@ Synapse Installation
.. _federation:
-* For details on how to install synapse, see `<INSTALL.md>`_.
+* For details on how to install synapse, see
+ `Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
@@ -106,7 +107,8 @@ from a web client.
Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
-connect from a client: see `<INSTALL.md#tls-certificates>`_.
+connect from a client: see
+`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
@@ -186,11 +188,11 @@ impact to other applications will be minimal.
Upgrading an existing Synapse
=============================
-The instructions for upgrading synapse are in `UPGRADE.rst`_.
+The instructions for upgrading synapse are in `the upgrade notes`_.
Please check these instructions as upgrading may require extra steps for some
versions of synapse.
-.. _UPGRADE.rst: UPGRADE.rst
+.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
.. _reverse-proxy:
@@ -265,7 +267,7 @@ Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix
Before setting up a development environment for synapse, make sure you have the
system dependencies (such as the python header files) installed - see
-`Installing from source <INSTALL.md#installing-from-source>`_.
+`Installing from source <https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source>`_.
To check out a synapse for development, clone the git repo into a working
directory of your choice::
@@ -333,8 +335,8 @@ access the API as a Matrix client would. It is able to run Synapse directly from
the source tree, so installation of the server is not required.
Testing with SyTest is recommended for verifying that changes related to the
-Client-Server API are functioning correctly. See the `installation instructions
-<https://github.com/matrix-org/sytest#installing>`_ for details.
+Client-Server API are functioning correctly. See the `SyTest installation
+instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
Platform dependencies
diff --git a/UPGRADE.rst b/UPGRADE.rst
index ee8b4fa6..17ecd935 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -1,1341 +1,7 @@
Upgrading Synapse
=================
-Before upgrading check if any special steps are required to upgrade from the
-version you currently have installed to the current version of Synapse. The extra
-instructions that may be required are listed later in this document.
+This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrading>`_.
+Please update your links.
-* Check that your versions of Python and PostgreSQL are still supported.
-
- Synapse follows upstream lifecycles for `Python`_ and `PostgreSQL`_, and
- removes support for versions which are no longer maintained.
-
- The website https://endoflife.date also offers convenient summaries.
-
- .. _Python: https://devguide.python.org/devcycle/#end-of-life-branches
- .. _PostgreSQL: https://www.postgresql.org/support/versioning/
-
-* If Synapse was installed using `prebuilt packages
- <INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
- for upgrading those packages.
-
-* If Synapse was installed from source, then:
-
- 1. Activate the virtualenv before upgrading. For example, if Synapse is
- installed in a virtualenv in ``~/synapse/env`` then run:
-
- .. code:: bash
-
- source ~/synapse/env/bin/activate
-
- 2. If Synapse was installed using pip then upgrade to the latest version by
- running:
-
- .. code:: bash
-
- pip install --upgrade matrix-synapse
-
- If Synapse was installed using git then upgrade to the latest version by
- running:
-
- .. code:: bash
-
- git pull
- pip install --upgrade .
-
- 3. Restart Synapse:
-
- .. code:: bash
-
- ./synctl restart
-
-To check whether your update was successful, you can check the running server
-version with:
-
-.. code:: bash
-
- # you may need to replace 'localhost:8008' if synapse is not configured
- # to listen on port 8008.
-
- curl http://localhost:8008/_synapse/admin/v1/server_version
-
-Rolling back to older versions
-------------------------------
-
-Rolling back to previous releases can be difficult, due to database schema
-changes between releases. Where we have been able to test the rollback process,
-this will be noted below.
-
-In general, you will need to undo any changes made during the upgrade process,
-for example:
-
-* pip:
-
- .. code:: bash
-
- source env/bin/activate
- # replace `1.3.0` accordingly:
- pip install matrix-synapse==1.3.0
-
-* Debian:
-
- .. code:: bash
-
- # replace `1.3.0` and `stretch` accordingly:
- wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
- dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
-
-Upgrading to v1.37.0
-====================
-
-Deprecation of the current spam checker interface
--------------------------------------------------
-
-The current spam checker interface is deprecated in favour of a new generic modules system.
-Authors of spam checker modules can refer to `this documentation <https://matrix-org.github.io/synapse/develop/modules.html#porting-an-existing-module-that-uses-the-old-interface>`_
-to update their modules. Synapse administrators can refer to `this documentation <https://matrix-org.github.io/synapse/develop/modules.html#using-modules>`_
-to update their configuration once the modules they are using have been updated.
-
-We plan to remove support for the current spam checker interface in August 2021.
-
-More module interfaces will be ported over to this new generic system in future versions
-of Synapse.
-
-
-Upgrading to v1.34.0
-====================
-
-``room_invite_state_types`` configuration setting
------------------------------------------------
-
-The ``room_invite_state_types`` configuration setting has been deprecated and
-replaced with ``room_prejoin_state``. See the `sample configuration file <https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515>`_.
-
-If you have set ``room_invite_state_types`` to the default value you should simply
-remove it from your configuration file. The default value used to be:
-
-.. code:: yaml
-
- room_invite_state_types:
- - "m.room.join_rules"
- - "m.room.canonical_alias"
- - "m.room.avatar"
- - "m.room.encryption"
- - "m.room.name"
-
-If you have customised this value, you should remove ``room_invite_state_types`` and
-configure ``room_prejoin_state`` instead.
-
-
-
-Upgrading to v1.33.0
-====================
-
-Account Validity HTML templates can now display a user's expiration date
-------------------------------------------------------------------------
-
-This may affect you if you have enabled the account validity feature, and have made use of a
-custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path``
-Synapse config options.
-
-The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the
-future date of which their account has been renewed until. See the
-`default template <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_renewed.html>`_
-for an example of usage.
-
-ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users
-when they attempt to renew their account with a valid renewal token that has already been used before. The default
-template contents can been found
-`here <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_previously_renewed.html>`_,
-and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see
-upon attempting to use a valid renewal token more than once.
-
-
-Upgrading to v1.32.0
-====================
-
-Regression causing connected Prometheus instances to become overwhelmed
------------------------------------------------------------------------
-
-This release introduces `a regression <https://github.com/matrix-org/synapse/issues/9853>`_
-that can overwhelm connected Prometheus instances. This issue is not present in
-Synapse v1.32.0rc1.
-
-If you have been affected, please downgrade to 1.31.0. You then may need to
-remove excess writeahead logs in order for Prometheus to recover. Instructions
-for doing so are provided
-`here <https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183>`_.
-
-Dropping support for old Python, Postgres and SQLite versions
--------------------------------------------------------------
-
-In line with our `deprecation policy <https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md>`_,
-we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream.
-
-This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+.
-
-Removal of old List Accounts Admin API
---------------------------------------
-
-The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/<user_id>``) has been removed in this version.
-
-The `v2 list accounts API <https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts>`_
-has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``.
-
-The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
-
-Application Services must use type ``m.login.application_service`` when registering users
------------------------------------------------------------------------------------------
-
-In compliance with the
-`Application Service spec <https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions>`_,
-Application Services are now required to use the ``m.login.application_service`` type when registering users via the
-``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0.
-
-Please ensure your Application Services are up to date.
-
-Upgrading to v1.29.0
-====================
-
-Requirement for X-Forwarded-Proto header
-----------------------------------------
-
-When using Synapse with a reverse proxy (in particular, when using the
-`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
-`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
-will log a warning on each received request.
-
-To avoid the warning, administrators using a reverse proxy should ensure that
-the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
-indicate the protocol used by the client.
-
-Synapse also requires the `Host` header to be preserved.
-
-See the `reverse proxy documentation <docs/reverse_proxy.md>`_, where the
-example configurations have been updated to show how to set these headers.
-
-(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
-sets `X-Forwarded-Proto` by default.)
-
-Upgrading to v1.27.0
-====================
-
-Changes to callback URI for OAuth2 / OpenID Connect and SAML2
--------------------------------------------------------------
-
-This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
-
-* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
- provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
- to the list of permitted "redirect URIs" at the identity provider.
-
- See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
- Connect.
-
-* If your server is configured for single sign-on via a SAML2 identity provider, you will
- need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
- "ACS location" (also known as "allowed callback URLs") at the identity provider.
-
- The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
- ``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
- provider uses this property to validate or otherwise identify Synapse, its configuration
- will need to be updated to use the new URL. Alternatively you could create a new, separate
- "EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
- the existing "EntityDescriptor" as they were.
-
-Changes to HTML templates
--------------------------
-
-The HTML templates for SSO and email notifications now have `Jinja2's autoescape <https://jinja.palletsprojects.com/en/2.11.x/api/#autoescaping>`_
-enabled for files ending in ``.html``, ``.htm``, and ``.xml``. If you have customised
-these templates and see issues when viewing them you might need to update them.
-It is expected that most configurations will need no changes.
-
-If you have customised the templates *names* for these templates, it is recommended
-to verify they end in ``.html`` to ensure autoescape is enabled.
-
-The above applies to the following templates:
-
-* ``add_threepid.html``
-* ``add_threepid_failure.html``
-* ``add_threepid_success.html``
-* ``notice_expiry.html``
-* ``notice_expiry.html``
-* ``notif_mail.html`` (which, by default, includes ``room.html`` and ``notif.html``)
-* ``password_reset.html``
-* ``password_reset_confirmation.html``
-* ``password_reset_failure.html``
-* ``password_reset_success.html``
-* ``registration.html``
-* ``registration_failure.html``
-* ``registration_success.html``
-* ``sso_account_deactivated.html``
-* ``sso_auth_bad_user.html``
-* ``sso_auth_confirm.html``
-* ``sso_auth_success.html``
-* ``sso_error.html``
-* ``sso_login_idp_picker.html``
-* ``sso_redirect_confirm.html``
-
-Upgrading to v1.26.0
-====================
-
-Rolling back to v1.25.0 after a failed upgrade
-----------------------------------------------
-
-v1.26.0 includes a lot of large changes. If something problematic occurs, you
-may want to roll-back to a previous version of Synapse. Because v1.26.0 also
-includes a new database schema version, reverting that version is also required
-alongside the generic rollback instructions mentioned above. In short, to roll
-back to v1.25.0 you need to:
-
-1. Stop the server
-2. Decrease the schema version in the database:
-
- .. code:: sql
-
- UPDATE schema_version SET version = 58;
-
-3. Delete the ignored users & chain cover data:
-
- .. code:: sql
-
- DROP TABLE IF EXISTS ignored_users;
- UPDATE rooms SET has_auth_chain_index = false;
-
- For PostgreSQL run:
-
- .. code:: sql
-
- TRUNCATE event_auth_chain_links;
- TRUNCATE event_auth_chains;
-
- For SQLite run:
-
- .. code:: sql
-
- DELETE FROM event_auth_chain_links;
- DELETE FROM event_auth_chains;
-
-4. Mark the deltas as not run (so they will re-run on upgrade).
-
- .. code:: sql
-
- DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
- DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
-
-5. Downgrade Synapse by following the instructions for your installation method
- in the "Rolling back to older versions" section above.
-
-Upgrading to v1.25.0
-====================
-
-Last release supporting Python 3.5
-----------------------------------
-
-This is the last release of Synapse which guarantees support with Python 3.5,
-which passed its upstream End of Life date several months ago.
-
-We will attempt to maintain support through March 2021, but without guarantees.
-
-In the future, Synapse will follow upstream schedules for ending support of
-older versions of Python and PostgreSQL. Please upgrade to at least Python 3.6
-and PostgreSQL 9.6 as soon as possible.
-
-Blacklisting IP ranges
-----------------------
-
-Synapse v1.25.0 includes new settings, ``ip_range_blacklist`` and
-``ip_range_whitelist``, for controlling outgoing requests from Synapse for federation,
-identity servers, push, and for checking key validity for third-party invite events.
-The previous setting, ``federation_ip_range_blacklist``, is deprecated. The new
-``ip_range_blacklist`` defaults to private IP ranges if it is not defined.
-
-If you have never customised ``federation_ip_range_blacklist`` it is recommended
-that you remove that setting.
-
-If you have customised ``federation_ip_range_blacklist`` you should update the
-setting name to ``ip_range_blacklist``.
-
-If you have a custom push server that is reached via private IP space you may
-need to customise ``ip_range_blacklist`` or ``ip_range_whitelist``.
-
-Upgrading to v1.24.0
-====================
-
-Custom OpenID Connect mapping provider breaking change
-------------------------------------------------------
-
-This release allows the OpenID Connect mapping provider to perform normalisation
-of the localpart of the Matrix ID. This allows for the mapping provider to
-specify different algorithms, instead of the [default way](https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets).
-
-If your Synapse configuration uses a custom mapping provider
-(`oidc_config.user_mapping_provider.module` is specified and not equal to
-`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`) then you *must* ensure
-that `map_user_attributes` of the mapping provider performs some normalisation
-of the `localpart` returned. To match previous behaviour you can use the
-`map_username_to_mxid_localpart` function provided by Synapse. An example is
-shown below:
-
-.. code-block:: python
-
- from synapse.types import map_username_to_mxid_localpart
-
- class MyMappingProvider:
- def map_user_attributes(self, userinfo, token):
- # ... your custom logic ...
- sso_user_id = ...
- localpart = map_username_to_mxid_localpart(sso_user_id)
-
- return {"localpart": localpart}
-
-Removal historical Synapse Admin API
-------------------------------------
-
-Historically, the Synapse Admin API has been accessible under:
-
-* ``/_matrix/client/api/v1/admin``
-* ``/_matrix/client/unstable/admin``
-* ``/_matrix/client/r0/admin``
-* ``/_synapse/admin/v1``
-
-The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0.
-The Admin API is now only accessible under:
-
-* ``/_synapse/admin/v1``
-
-The only exception is the `/admin/whois` endpoint, which is
-`also available via the client-server API <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_.
-
-The deprecation of the old endpoints was announced with Synapse 1.20.0 (released
-on 2020-09-22) and makes it easier for homeserver admins to lock down external
-access to the Admin API endpoints.
-
-Upgrading to v1.23.0
-====================
-
-Structured logging configuration breaking changes
--------------------------------------------------
-
-This release deprecates use of the ``structured: true`` logging configuration for
-structured logging. If your logging configuration contains ``structured: true``
-then it should be modified based on the `structured logging documentation
-<https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md>`_.
-
-The ``structured`` and ``drains`` logging options are now deprecated and should
-be replaced by standard logging configuration of ``handlers`` and ``formatters``.
-
-A future will release of Synapse will make using ``structured: true`` an error.
-
-Upgrading to v1.22.0
-====================
-
-ThirdPartyEventRules breaking changes
--------------------------------------
-
-This release introduces a backwards-incompatible change to modules making use of
-``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the
-``third_party_event_rules`` config option, please make sure it is updated to handle
-the below change:
-
-The ``http_client`` argument is no longer passed to modules as they are initialised. Instead,
-modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class.
-Modules are now passed a ``module_api`` argument during initialisation, which is an instance of
-``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as
-the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules.
-
-Upgrading to v1.21.0
-====================
-
-Forwarding ``/_synapse/client`` through your reverse proxy
-----------------------------------------------------------
-
-The `reverse proxy documentation
-<https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md>`_ has been updated
-to include reverse proxy directives for ``/_synapse/client/*`` endpoints. As the user password
-reset flow now uses endpoints under this prefix, **you must update your reverse proxy
-configurations for user password reset to work**.
-
-Additionally, note that the `Synapse worker documentation
-<https://github.com/matrix-org/synapse/blob/develop/docs/workers.md>`_ has been updated to
- state that the ``/_synapse/client/password_reset/email/submit_token`` endpoint can be handled
-by all workers. If you make use of Synapse's worker feature, please update your reverse proxy
-configuration to reflect this change.
-
-New HTML templates
-------------------
-
-A new HTML template,
-`password_reset_confirmation.html <https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html>`_,
-has been added to the ``synapse/res/templates`` directory. If you are using a
-custom template directory, you may want to copy the template over and modify it.
-
-Note that as of v1.20.0, templates do not need to be included in custom template
-directories for Synapse to start. The default templates will be used if a custom
-template cannot be found.
-
-This page will appear to the user after clicking a password reset link that has
-been emailed to them.
-
-To complete password reset, the page must include a way to make a `POST`
-request to
-``/_synapse/client/password_reset/{medium}/submit_token``
-with the query parameters from the original link, presented as a URL-encoded form. See the file
-itself for more details.
-
-Updated Single Sign-on HTML Templates
--------------------------------------
-
-The ``saml_error.html`` template was removed from Synapse and replaced with the
-``sso_error.html`` template. If your Synapse is configured to use SAML and a
-custom ``sso_redirect_confirm_template_dir`` configuration then any customisations
-of the ``saml_error.html`` template will need to be merged into the ``sso_error.html``
-template. These templates are similar, but the parameters are slightly different:
-
-* The ``msg`` parameter should be renamed to ``error_description``.
-* There is no longer a ``code`` parameter for the response code.
-* A string ``error`` parameter is available that includes a short hint of why a
- user is seeing the error page.
-
-Upgrading to v1.18.0
-====================
-
-Docker `-py3` suffix will be removed in future versions
--------------------------------------------------------
-
-From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
-
-On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
-
-Scripts relying on the `-py3` suffix will need to be updated.
-
-Redis replication is now recommended in lieu of TCP replication
----------------------------------------------------------------
-
-When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.**
-See `docs/workers.md <docs/workers.md>`_ for more details.
-
-Upgrading to v1.14.0
-====================
-
-This version includes a database update which is run as part of the upgrade,
-and which may take a couple of minutes in the case of a large server. Synapse
-will not respond to HTTP requests while this update is taking place.
-
-Upgrading to v1.13.0
-====================
-
-Incorrect database migration in old synapse versions
-----------------------------------------------------
-
-A bug was introduced in Synapse 1.4.0 which could cause the room directory to
-be incomplete or empty if Synapse was upgraded directly from v1.2.1 or
-earlier, to versions between v1.4.0 and v1.12.x.
-
-This will *not* be a problem for Synapse installations which were:
- * created at v1.4.0 or later,
- * upgraded via v1.3.x, or
- * upgraded straight from v1.2.1 or earlier to v1.13.0 or later.
-
-If completeness of the room directory is a concern, installations which are
-affected can be repaired as follows:
-
-1. Run the following sql from a `psql` or `sqlite3` console:
-
- .. code:: sql
-
- INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
- ('populate_stats_process_rooms', '{}', 'current_state_events_membership');
-
- INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
- ('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
-
-2. Restart synapse.
-
-New Single Sign-on HTML Templates
----------------------------------
-
-New templates (``sso_auth_confirm.html``, ``sso_auth_success.html``, and
-``sso_account_deactivated.html``) were added to Synapse. If your Synapse is
-configured to use SSO and a custom ``sso_redirect_confirm_template_dir``
-configuration then these templates will need to be copied from
-`synapse/res/templates <synapse/res/templates>`_ into that directory.
-
-Synapse SSO Plugins Method Deprecation
---------------------------------------
-
-Plugins using the ``complete_sso_login`` method of
-``synapse.module_api.ModuleApi`` should update to using the async/await
-version ``complete_sso_login_async`` which includes additional checks. The
-non-async version is considered deprecated.
-
-Rolling back to v1.12.4 after a failed upgrade
-----------------------------------------------
-
-v1.13.0 includes a lot of large changes. If something problematic occurs, you
-may want to roll-back to a previous version of Synapse. Because v1.13.0 also
-includes a new database schema version, reverting that version is also required
-alongside the generic rollback instructions mentioned above. In short, to roll
-back to v1.12.4 you need to:
-
-1. Stop the server
-2. Decrease the schema version in the database:
-
- .. code:: sql
-
- UPDATE schema_version SET version = 57;
-
-3. Downgrade Synapse by following the instructions for your installation method
- in the "Rolling back to older versions" section above.
-
-
-Upgrading to v1.12.0
-====================
-
-This version includes a database update which is run as part of the upgrade,
-and which may take some time (several hours in the case of a large
-server). Synapse will not respond to HTTP requests while this update is taking
-place.
-
-This is only likely to be a problem in the case of a server which is
-participating in many rooms.
-
-0. As with all upgrades, it is recommended that you have a recent backup of
- your database which can be used for recovery in the event of any problems.
-
-1. As an initial check to see if you will be affected, you can try running the
- following query from the `psql` or `sqlite3` console. It is safe to run it
- while Synapse is still running.
-
- .. code:: sql
-
- SELECT MAX(q.v) FROM (
- SELECT (
- SELECT ej.json AS v
- FROM state_events se INNER JOIN event_json ej USING (event_id)
- WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
- LIMIT 1
- ) FROM rooms WHERE rooms.room_version IS NULL
- ) q;
-
- This query will take about the same amount of time as the upgrade process: ie,
- if it takes 5 minutes, then it is likely that Synapse will be unresponsive for
- 5 minutes during the upgrade.
-
- If you consider an outage of this duration to be acceptable, no further
- action is necessary and you can simply start Synapse 1.12.0.
-
- If you would prefer to reduce the downtime, continue with the steps below.
-
-2. The easiest workaround for this issue is to manually
- create a new index before upgrading. On PostgreSQL, his can be done as follows:
-
- .. code:: sql
-
- CREATE INDEX CONCURRENTLY tmp_upgrade_1_12_0_index
- ON state_events(room_id) WHERE type = 'm.room.create';
-
- The above query may take some time, but is also safe to run while Synapse is
- running.
-
- We assume that no SQLite users have databases large enough to be
- affected. If you *are* affected, you can run a similar query, omitting the
- ``CONCURRENTLY`` keyword. Note however that this operation may in itself cause
- Synapse to stop running for some time. Synapse admins are reminded that
- `SQLite is not recommended for use outside a test
- environment <https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql>`_.
-
-3. Once the index has been created, the ``SELECT`` query in step 1 above should
- complete quickly. It is therefore safe to upgrade to Synapse 1.12.0.
-
-4. Once Synapse 1.12.0 has successfully started and is responding to HTTP
- requests, the temporary index can be removed:
-
- .. code:: sql
-
- DROP INDEX tmp_upgrade_1_12_0_index;
-
-Upgrading to v1.10.0
-====================
-
-Synapse will now log a warning on start up if used with a PostgreSQL database
-that has a non-recommended locale set.
-
-See `docs/postgres.md <docs/postgres.md>`_ for details.
-
-
-Upgrading to v1.8.0
-===================
-
-Specifying a ``log_file`` config option will now cause Synapse to refuse to
-start, and should be replaced by with the ``log_config`` option. Support for
-the ``log_file`` option was removed in v1.3.0 and has since had no effect.
-
-
-Upgrading to v1.7.0
-===================
-
-In an attempt to configure Synapse in a privacy preserving way, the default
-behaviours of ``allow_public_rooms_without_auth`` and
-``allow_public_rooms_over_federation`` have been inverted. This means that by
-default, only authenticated users querying the Client/Server API will be able
-to query the room directory, and relatedly that the server will not share
-room directory information with other servers over federation.
-
-If your installation does not explicitly set these settings one way or the other
-and you want either setting to be ``true`` then it will necessary to update
-your homeserver configuration file accordingly.
-
-For more details on the surrounding context see our `explainer
-<https://matrix.org/blog/2019/11/09/avoiding-unwelcome-visitors-on-private-matrix-servers>`_.
-
-
-Upgrading to v1.5.0
-===================
-
-This release includes a database migration which may take several minutes to
-complete if there are a large number (more than a million or so) of entries in
-the ``devices`` table. This is only likely to a be a problem on very large
-installations.
-
-
-Upgrading to v1.4.0
-===================
-
-New custom templates
---------------------
-
-If you have configured a custom template directory with the
-``email.template_dir`` option, be aware that there are new templates regarding
-registration and threepid management (see below) that must be included.
-
-* ``registration.html`` and ``registration.txt``
-* ``registration_success.html`` and ``registration_failure.html``
-* ``add_threepid.html`` and ``add_threepid.txt``
-* ``add_threepid_failure.html`` and ``add_threepid_success.html``
-
-Synapse will expect these files to exist inside the configured template
-directory, and **will fail to start** if they are absent.
-To view the default templates, see `synapse/res/templates
-<https://github.com/matrix-org/synapse/tree/master/synapse/res/templates>`_.
-
-3pid verification changes
--------------------------
-
-**Note: As of this release, users will be unable to add phone numbers or email
-addresses to their accounts, without changes to the Synapse configuration. This
-includes adding an email address during registration.**
-
-It is possible for a user to associate an email address or phone number
-with their account, for a number of reasons:
-
-* for use when logging in, as an alternative to the user id.
-* in the case of email, as an alternative contact to help with account recovery.
-* in the case of email, to receive notifications of missed messages.
-
-Before an email address or phone number can be added to a user's account,
-or before such an address is used to carry out a password-reset, Synapse must
-confirm the operation with the owner of the email address or phone number.
-It does this by sending an email or text giving the user a link or token to confirm
-receipt. This process is known as '3pid verification'. ('3pid', or 'threepid',
-stands for third-party identifier, and we use it to refer to external
-identifiers such as email addresses and phone numbers.)
-
-Previous versions of Synapse delegated the task of 3pid verification to an
-identity server by default. In most cases this server is ``vector.im`` or
-``matrix.org``.
-
-In Synapse 1.4.0, for security and privacy reasons, the homeserver will no
-longer delegate this task to an identity server by default. Instead,
-the server administrator will need to explicitly decide how they would like the
-verification messages to be sent.
-
-In the medium term, the ``vector.im`` and ``matrix.org`` identity servers will
-disable support for delegated 3pid verification entirely. However, in order to
-ease the transition, they will retain the capability for a limited
-period. Delegated email verification will be disabled on Monday 2nd December
-2019 (giving roughly 2 months notice). Disabling delegated SMS verification
-will follow some time after that once SMS verification support lands in
-Synapse.
-
-Once delegated 3pid verification support has been disabled in the ``vector.im`` and
-``matrix.org`` identity servers, all Synapse versions that depend on those
-instances will be unable to verify email and phone numbers through them. There
-are no imminent plans to remove delegated 3pid verification from Sydent
-generally. (Sydent is the identity server project that backs the ``vector.im`` and
-``matrix.org`` instances).
-
-Email
-~~~~~
-Following upgrade, to continue verifying email (e.g. as part of the
-registration process), admins can either:-
-
-* Configure Synapse to use an email server.
-* Run or choose an identity server which allows delegated email verification
- and delegate to it.
-
-Configure SMTP in Synapse
-+++++++++++++++++++++++++
-
-To configure an SMTP server for Synapse, modify the configuration section
-headed ``email``, and be sure to have at least the ``smtp_host, smtp_port``
-and ``notif_from`` fields filled out.
-
-You may also need to set ``smtp_user``, ``smtp_pass``, and
-``require_transport_security``.
-
-See the `sample configuration file <docs/sample_config.yaml>`_ for more details
-on these settings.
-
-Delegate email to an identity server
-++++++++++++++++++++++++++++++++++++
-
-Some admins will wish to continue using email verification as part of the
-registration process, but will not immediately have an appropriate SMTP server
-at hand.
-
-To this end, we will continue to support email verification delegation via the
-``vector.im`` and ``matrix.org`` identity servers for two months. Support for
-delegated email verification will be disabled on Monday 2nd December.
-
-The ``account_threepid_delegates`` dictionary defines whether the homeserver
-should delegate an external server (typically an `identity server
-<https://matrix.org/docs/spec/identity_service/r0.2.1>`_) to handle sending
-confirmation messages via email and SMS.
-
-So to delegate email verification, in ``homeserver.yaml``, set
-``account_threepid_delegates.email`` to the base URL of an identity server. For
-example:
-
-.. code:: yaml
-
- account_threepid_delegates:
- email: https://example.com # Delegate email sending to example.com
-
-Note that ``account_threepid_delegates.email`` replaces the deprecated
-``email.trust_identity_server_for_password_resets``: if
-``email.trust_identity_server_for_password_resets`` is set to ``true``, and
-``account_threepid_delegates.email`` is not set, then the first entry in
-``trusted_third_party_id_servers`` will be used as the
-``account_threepid_delegate`` for email. This is to ensure compatibility with
-existing Synapse installs that set up external server handling for these tasks
-before v1.4.0. If ``email.trust_identity_server_for_password_resets`` is
-``true`` and no trusted identity server domains are configured, Synapse will
-report an error and refuse to start.
-
-If ``email.trust_identity_server_for_password_resets`` is ``false`` or absent
-and no ``email`` delegate is configured in ``account_threepid_delegates``,
-then Synapse will send email verification messages itself, using the configured
-SMTP server (see above).
-that type.
-
-Phone numbers
-~~~~~~~~~~~~~
-
-Synapse does not support phone-number verification itself, so the only way to
-maintain the ability for users to add phone numbers to their accounts will be
-by continuing to delegate phone number verification to the ``matrix.org`` and
-``vector.im`` identity servers (or another identity server that supports SMS
-sending).
-
-The ``account_threepid_delegates`` dictionary defines whether the homeserver
-should delegate an external server (typically an `identity server
-<https://matrix.org/docs/spec/identity_service/r0.2.1>`_) to handle sending
-confirmation messages via email and SMS.
-
-So to delegate phone number verification, in ``homeserver.yaml``, set
-``account_threepid_delegates.msisdn`` to the base URL of an identity
-server. For example:
-
-.. code:: yaml
-
- account_threepid_delegates:
- msisdn: https://example.com # Delegate sms sending to example.com
-
-The ``matrix.org`` and ``vector.im`` identity servers will continue to support
-delegated phone number verification via SMS until such time as it is possible
-for admins to configure their servers to perform phone number verification
-directly. More details will follow in a future release.
-
-Rolling back to v1.3.1
-----------------------
-
-If you encounter problems with v1.4.0, it should be possible to roll back to
-v1.3.1, subject to the following:
-
-* The 'room statistics' engine was heavily reworked in this release (see
- `#5971 <https://github.com/matrix-org/synapse/pull/5971>`_), including
- significant changes to the database schema, which are not easily
- reverted. This will cause the room statistics engine to stop updating when
- you downgrade.
-
- The room statistics are essentially unused in v1.3.1 (in future versions of
- Synapse, they will be used to populate the room directory), so there should
- be no loss of functionality. However, the statistics engine will write errors
- to the logs, which can be avoided by setting the following in
- `homeserver.yaml`:
-
- .. code:: yaml
-
- stats:
- enabled: false
-
- Don't forget to re-enable it when you upgrade again, in preparation for its
- use in the room directory!
-
-Upgrading to v1.2.0
-===================
-
-Some counter metrics have been renamed, with the old names deprecated. See
-`the metrics documentation <docs/metrics-howto.md#renaming-of-metrics--deprecation-of-old-names-in-12>`_
-for details.
-
-Upgrading to v1.1.0
-===================
-
-Synapse v1.1.0 removes support for older Python and PostgreSQL versions, as
-outlined in `our deprecation notice <https://matrix.org/blog/2019/04/08/synapse-deprecating-postgres-9-4-and-python-2-x>`_.
-
-Minimum Python Version
-----------------------
-
-Synapse v1.1.0 has a minimum Python requirement of Python 3.5. Python 3.6 or
-Python 3.7 are recommended as they have improved internal string handling,
-significantly reducing memory usage.
-
-If you use current versions of the Matrix.org-distributed Debian packages or
-Docker images, action is not required.
-
-If you install Synapse in a Python virtual environment, please see "Upgrading to
-v0.34.0" for notes on setting up a new virtualenv under Python 3.
-
-Minimum PostgreSQL Version
---------------------------
-
-If using PostgreSQL under Synapse, you will need to use PostgreSQL 9.5 or above.
-Please see the
-`PostgreSQL documentation <https://www.postgresql.org/docs/11/upgrading.html>`_
-for more details on upgrading your database.
-
-Upgrading to v1.0
-=================
-
-Validation of TLS certificates
-------------------------------
-
-Synapse v1.0 is the first release to enforce
-validation of TLS certificates for the federation API. It is therefore
-essential that your certificates are correctly configured. See the `FAQ
-<docs/MSC1711_certificates_FAQ.md>`_ for more information.
-
-Note, v1.0 installations will also no longer be able to federate with servers
-that have not correctly configured their certificates.
-
-In rare cases, it may be desirable to disable certificate checking: for
-example, it might be essential to be able to federate with a given legacy
-server in a closed federation. This can be done in one of two ways:-
-
-* Configure the global switch ``federation_verify_certificates`` to ``false``.
-* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
-
-See the `sample configuration file <docs/sample_config.yaml>`_
-for more details on these settings.
-
-Email
------
-When a user requests a password reset, Synapse will send an email to the
-user to confirm the request.
-
-Previous versions of Synapse delegated the job of sending this email to an
-identity server. If the identity server was somehow malicious or became
-compromised, it would be theoretically possible to hijack an account through
-this means.
-
-Therefore, by default, Synapse v1.0 will send the confirmation email itself. If
-Synapse is not configured with an SMTP server, password reset via email will be
-disabled.
-
-To configure an SMTP server for Synapse, modify the configuration section
-headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
-and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
-``smtp_pass``, and ``require_transport_security``.
-
-If you are absolutely certain that you wish to continue using an identity
-server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``.
-
-See the `sample configuration file <docs/sample_config.yaml>`_
-for more details on these settings.
-
-New email templates
----------------
-Some new templates have been added to the default template directory for the purpose of the
-homeserver sending its own password reset emails. If you have configured a custom
-``template_dir`` in your Synapse config, these files will need to be added.
-
-``password_reset.html`` and ``password_reset.txt`` are HTML and plain text templates
-respectively that contain the contents of what will be emailed to the user upon attempting to
-reset their password via email. ``password_reset_success.html`` and
-``password_reset_failure.html`` are HTML files that the content of which (assuming no redirect
-URL is set) will be shown to the user after they attempt to click the link in the email sent
-to them.
-
-Upgrading to v0.99.0
-====================
-
-Please be aware that, before Synapse v1.0 is released around March 2019, you
-will need to replace any self-signed certificates with those verified by a
-root CA. Information on how to do so can be found at `the ACME docs
-<docs/ACME.md>`_.
-
-For more information on configuring TLS certificates see the `FAQ <docs/MSC1711_certificates_FAQ.md>`_.
-
-Upgrading to v0.34.0
-====================
-
-1. This release is the first to fully support Python 3. Synapse will now run on
- Python versions 3.5, or 3.6 (as well as 2.7). We recommend switching to
- Python 3, as it has been shown to give performance improvements.
-
- For users who have installed Synapse into a virtualenv, we recommend doing
- this by creating a new virtualenv. For example::
-
- virtualenv -p python3 ~/synapse/env3
- source ~/synapse/env3/bin/activate
- pip install matrix-synapse
-
- You can then start synapse as normal, having activated the new virtualenv::
-
- cd ~/synapse
- source env3/bin/activate
- synctl start
-
- Users who have installed from distribution packages should see the relevant
- package documentation. See below for notes on Debian packages.
-
- * When upgrading to Python 3, you **must** make sure that your log files are
- configured as UTF-8, by adding ``encoding: utf8`` to the
- ``RotatingFileHandler`` configuration (if you have one) in your
- ``<server>.log.config`` file. For example, if your ``log.config`` file
- contains::
-
- handlers:
- file:
- class: logging.handlers.RotatingFileHandler
- formatter: precise
- filename: homeserver.log
- maxBytes: 104857600
- backupCount: 10
- filters: [context]
- console:
- class: logging.StreamHandler
- formatter: precise
- filters: [context]
-
- Then you should update this to be::
-
- handlers:
- file:
- class: logging.handlers.RotatingFileHandler
- formatter: precise
- filename: homeserver.log
- maxBytes: 104857600
- backupCount: 10
- filters: [context]
- encoding: utf8
- console:
- class: logging.StreamHandler
- formatter: precise
- filters: [context]
-
- There is no need to revert this change if downgrading to Python 2.
-
- We are also making available Debian packages which will run Synapse on
- Python 3. You can switch to these packages with ``apt-get install
- matrix-synapse-py3``, however, please read `debian/NEWS
- <https://github.com/matrix-org/synapse/blob/release-v0.34.0/debian/NEWS>`_
- before doing so. The existing ``matrix-synapse`` packages will continue to
- use Python 2 for the time being.
-
-2. This release removes the ``riot.im`` from the default list of trusted
- identity servers.
-
- If ``riot.im`` is in your homeserver's list of
- ``trusted_third_party_id_servers``, you should remove it. It was added in
- case a hypothetical future identity server was put there. If you don't
- remove it, users may be unable to deactivate their accounts.
-
-3. This release no longer installs the (unmaintained) Matrix Console web client
- as part of the default installation. It is possible to re-enable it by
- installing it separately and setting the ``web_client_location`` config
- option, but please consider switching to another client.
-
-Upgrading to v0.33.7
-====================
-
-This release removes the example email notification templates from
-``res/templates`` (they are now internal to the python package). This should
-only affect you if you (a) deploy your Synapse instance from a git checkout or
-a github snapshot URL, and (b) have email notifications enabled.
-
-If you have email notifications enabled, you should ensure that
-``email.template_dir`` is either configured to point at a directory where you
-have installed customised templates, or leave it unset to use the default
-templates.
-
-Upgrading to v0.27.3
-====================
-
-This release expands the anonymous usage stats sent if the opt-in
-``report_stats`` configuration is set to ``true``. We now capture RSS memory
-and cpu use at a very coarse level. This requires administrators to install
-the optional ``psutil`` python module.
-
-We would appreciate it if you could assist by ensuring this module is available
-and ``report_stats`` is enabled. This will let us see if performance changes to
-synapse are having an impact to the general community.
-
-Upgrading to v0.15.0
-====================
-
-If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
-then you have to explicitly enable it in the config and update your dependencies
-dependencies. See README.rst for details.
-
-
-Upgrading to v0.11.0
-====================
-
-This release includes the option to send anonymous usage stats to matrix.org,
-and requires that administrators explictly opt in or out by setting the
-``report_stats`` option to either ``true`` or ``false``.
-
-We would really appreciate it if you could help our project out by reporting
-anonymized usage statistics from your homeserver. Only very basic aggregate
-data (e.g. number of users) will be reported, but it helps us to track the
-growth of the Matrix community, and helps us to make Matrix a success, as well
-as to convince other networks that they should peer with us.
-
-
-Upgrading to v0.9.0
-===================
-
-Application services have had a breaking API change in this version.
-
-They can no longer register themselves with a home server using the AS HTTP API. This
-decision was made because a compromised application service with free reign to register
-any regex in effect grants full read/write access to the home server if a regex of ``.*``
-is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
-big of a security risk to ignore, and so the ability to register with the HS remotely has
-been removed.
-
-It has been replaced by specifying a list of application service registrations in
-``homeserver.yaml``::
-
- app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
-
-Where ``registration-01.yaml`` looks like::
-
- url: <String> # e.g. "https://my.application.service.com"
- as_token: <String>
- hs_token: <String>
- sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
- namespaces:
- users:
- - exclusive: <Boolean>
- regex: <String> # e.g. "@prefix_.*"
- aliases:
- - exclusive: <Boolean>
- regex: <String>
- rooms:
- - exclusive: <Boolean>
- regex: <String>
-
-Upgrading to v0.8.0
-===================
-
-Servers which use captchas will need to add their public key to::
-
- static/client/register/register_config.js
-
- window.matrixRegistrationConfig = {
- recaptcha_public_key: "YOUR_PUBLIC_KEY"
- };
-
-This is required in order to support registration fallback (typically used on
-mobile devices).
-
-
-Upgrading to v0.7.0
-===================
-
-New dependencies are:
-
-- pydenticon
-- simplejson
-- syutil
-- matrix-angular-sdk
-
-To pull in these dependencies in a virtual env, run::
-
- python synapse/python_dependencies.py | xargs -n 1 pip install
-
-Upgrading to v0.6.0
-===================
-
-To pull in new dependencies, run::
-
- python setup.py develop --user
-
-This update includes a change to the database schema. To upgrade you first need
-to upgrade the database by running::
-
- python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
-
-Where `<db>` is the location of the database, `<server_name>` is the
-server name as specified in the synapse configuration, and `<signing_key>` is
-the location of the signing key as specified in the synapse configuration.
-
-This may take some time to complete. Failures of signatures and content hashes
-can safely be ignored.
-
-
-Upgrading to v0.5.1
-===================
-
-Depending on precisely when you installed v0.5.0 you may have ended up with
-a stale release of the reference matrix webclient installed as a python module.
-To uninstall it and ensure you are depending on the latest module, please run::
-
- $ pip uninstall syweb
-
-Upgrading to v0.5.0
-===================
-
-The webclient has been split out into a seperate repository/pacakage in this
-release. Before you restart your homeserver you will need to pull in the
-webclient package by running::
-
- python setup.py develop --user
-
-This release completely changes the database schema and so requires upgrading
-it before starting the new version of the homeserver.
-
-The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
-database. This will save all user information, such as logins and profiles,
-but will otherwise purge the database. This includes messages, which
-rooms the home server was a member of and room alias mappings.
-
-If you would like to keep your history, please take a copy of your database
-file and ask for help in #matrix:matrix.org. The upgrade process is,
-unfortunately, non trivial and requires human intervention to resolve any
-resulting conflicts during the upgrade process.
-
-Before running the command the homeserver should be first completely
-shutdown. To run it, simply specify the location of the database, e.g.:
-
- ./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
-
-Once this has successfully completed it will be safe to restart the
-homeserver. You may notice that the homeserver takes a few seconds longer to
-restart than usual as it reinitializes the database.
-
-On startup of the new version, users can either rejoin remote rooms using room
-aliases or by being reinvited. Alternatively, if any other homeserver sends a
-message to a room that the homeserver was previously in the local HS will
-automatically rejoin the room.
-
-Upgrading to v0.4.0
-===================
-
-This release needs an updated syutil version. Run::
-
- python setup.py develop
-
-You will also need to upgrade your configuration as the signing key format has
-changed. Run::
-
- python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
-
-
-Upgrading to v0.3.0
-===================
-
-This registration API now closely matches the login API. This introduces a bit
-more backwards and forwards between the HS and the client, but this improves
-the overall flexibility of the API. You can now GET on /register to retrieve a list
-of valid registration flows. Upon choosing one, they are submitted in the same
-way as login, e.g::
-
- {
- type: m.login.password,
- user: foo,
- password: bar
- }
-
-The default HS supports 2 flows, with and without Identity Server email
-authentication. Enabling captcha on the HS will add in an extra step to all
-flows: ``m.login.recaptcha`` which must be completed before you can transition
-to the next stage. There is a new login type: ``m.login.email.identity`` which
-contains the ``threepidCreds`` key which were previously sent in the original
-register request. For more information on this, see the specification.
-
-Web Client
-----------
-
-The VoIP specification has changed between v0.2.0 and v0.3.0. Users should
-refresh any browser tabs to get the latest web client code. Users on
-v0.2.0 of the web client will not be able to call those on v0.3.0 and
-vice versa.
-
-
-Upgrading to v0.2.0
-===================
-
-The home server now requires setting up of SSL config before it can run. To
-automatically generate default config use::
-
- $ python synapse/app/homeserver.py \
- --server-name machine.my.domain.name \
- --bind-port 8448 \
- --config-path homeserver.config \
- --generate-config
-
-This config can be edited if desired, for example to specify a different SSL
-certificate to use. Once done you can run the home server using::
-
- $ python synapse/app/homeserver.py --config-path homeserver.config
-
-See the README.rst for more information.
-
-Also note that some config options have been renamed, including:
-
-- "host" to "server-name"
-- "database" to "database-path"
-- "port" to "bind-port" and "unsecure-port"
-
-
-Upgrading to v0.0.1
-===================
-
-This release completely changes the database schema and so requires upgrading
-it before starting the new version of the homeserver.
-
-The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
-database. This will save all user information, such as logins and profiles,
-but will otherwise purge the database. This includes messages, which
-rooms the home server was a member of and room alias mappings.
-
-Before running the command the homeserver should be first completely
-shutdown. To run it, simply specify the location of the database, e.g.:
-
- ./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
-
-Once this has successfully completed it will be safe to restart the
-homeserver. You may notice that the homeserver takes a few seconds longer to
-restart than usual as it reinitializes the database.
-
-On startup of the new version, users can either rejoin remote rooms using room
-aliases or by being reinvited. Alternatively, if any other homeserver sends a
-message to a room that the homeserver was previously in the local HS will
-automatically rejoin the room.
+The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
index 5d42b346..2844cbc8 100644
--- a/contrib/systemd/README.md
+++ b/contrib/systemd/README.md
@@ -2,7 +2,8 @@
This is a setup for managing synapse with a user contributed systemd unit
file. It provides a `matrix-synapse` systemd unit file that should be tailored
to accommodate your installation in accordance with the installation
-instructions provided in [installation instructions](../../INSTALL.md).
+instructions provided in
+[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
## Setup
1. Under the service section, ensure the `User` variable matches which user
diff --git a/debian/changelog b/debian/changelog
index eb481a75..0bb7fe42 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+matrix-synapse (1.38.0-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Install renamed documents under the old names.
+
+ -- Andrej Shadura <andrewsh@debian.org> Wed, 14 Jul 2021 09:00:51 +0200
+
matrix-synapse (1.37.1-1~fto10+1) buster-fasttrack; urgency=medium
* Rebuild for buster-fasttrack.
diff --git a/debian/docs b/debian/docs
index 67b1838b..fb77d6ec 100644
--- a/debian/docs
+++ b/debian/docs
@@ -1,5 +1,5 @@
AUTHORS.rst
CONTRIBUTING.md
-INSTALL.md
README.rst
-UPGRADE.rst
+docs/setup/installation.md
+docs/upgrade.md
diff --git a/debian/rules b/debian/rules
index a397b932..dd3340ce 100755
--- a/debian/rules
+++ b/debian/rules
@@ -17,6 +17,12 @@ override_dh_auto_install:
override_dh_installdocs:
dh_installdocs
+ mv debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/installation.md \
+ debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/INSTALL.md
+ mv debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/upgrade.md \
+ debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/UPGRADE.md
+ ln -sr debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/UPGRADE.md \
+ debian/$(PYBUILD_NAME)/usr/share/doc/matrix-synapse/UPGRADE.rst
sed -i \
-e 's,/opt/venvs/matrix-synapse/bin/python,/usr/bin/python3,' \
-e 's,Restart=always,Restart=on-failure,' \
diff --git a/docker/README.md b/docker/README.md
index 3f28cdad..edf917bb 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -45,7 +45,7 @@ docker run -it --rm \
```
For information on picking a suitable server name, see
-https://github.com/matrix-org/synapse/blob/master/INSTALL.md.
+https://matrix-org.github.io/synapse/latest/setup/installation.html.
The above command will generate a `homeserver.yaml` in (typically)
`/var/lib/docker/volumes/synapse-data/_data`. You should check this file, and
@@ -139,7 +139,7 @@ For documentation on using a reverse proxy, see
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
For more information on enabling TLS support in synapse itself, see
-https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
+https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates. Of
course, you will need to expose the TLS port from the container with a `-p`
argument to `docker run`.
diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml
index 8c9b31ac..09e86ca0 100644
--- a/docs/.sample_config_header.yaml
+++ b/docs/.sample_config_header.yaml
@@ -8,7 +8,8 @@
#
# It is *not* intended to be copied and used as the basis for a real
# homeserver.yaml. Instead, if you are starting from scratch, please generate
-# a fresh config using Synapse by following the instructions in INSTALL.md.
+# a fresh config using Synapse by following the instructions in
+# https://matrix-org.github.io/synapse/latest/setup/installation.html.
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
index ce8189d4..283f288a 100644
--- a/docs/MSC1711_certificates_FAQ.md
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -14,7 +14,7 @@ upgraded, however it may be of use to those with old installs returning to the
project.
If you are setting up a server from scratch you almost certainly should look at
-the [installation guide](../INSTALL.md) instead.
+the [installation guide](setup/installation.md) instead.
## Introduction
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 98969bdd..db4ef1a4 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -11,7 +11,7 @@
- [Delegation](delegate.md)
# Upgrading
- - [Upgrading between Synapse Versions](upgrading/README.md)
+ - [Upgrading between Synapse Versions](upgrade.md)
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
# Usage
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index ef1e735e..4a65d0c3 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -36,7 +36,17 @@ It returns a JSON body like the following:
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
- "consent_version": null
+ "consent_version": null,
+ "external_ids": [
+ {
+ "auth_provider": "<provider1>",
+ "external_id": "<user_id_provider_1>"
+ },
+ {
+ "auth_provider": "<provider2>",
+ "external_id": "<user_id_provider_2>"
+ }
+ ]
}
```
diff --git a/docs/modules.md b/docs/modules.md
index 3a9fab61..bec1c06d 100644
--- a/docs/modules.md
+++ b/docs/modules.md
@@ -194,7 +194,7 @@ In order to port a module that uses Synapse's old module interface, its author n
* ensure the module's callbacks are all asynchronous.
* register their callbacks using one or more of the `register_[...]_callbacks` methods
- from the `ModuleApi` class in the module's `__init__` method (see [this section](#registering-a-web-resource)
+ from the `ModuleApi` class in the module's `__init__` method (see [this section](#registering-a-callback)
for more info).
Additionally, if the module is packaged with an additional web resource, the module
diff --git a/docs/postgres.md b/docs/postgres.md
index f83155e5..2c0a5b80 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -8,14 +8,14 @@ Synapse will require the python postgres client library in order to
connect to a postgres database.
- If you are using the [matrix.org debian/ubuntu
- packages](../INSTALL.md#matrixorg-packages), the necessary python
+ packages](setup/installation.md#matrixorg-packages), the necessary python
library will already be installed, but you will need to ensure the
low-level postgres library is installed, which you can do with
`apt install libpq5`.
- For other pre-built packages, please consult the documentation from
the relevant package.
- If you installed synapse [in a
- virtualenv](../INSTALL.md#installing-from-source), you can install
+ virtualenv](setup/installation.md#installing-from-source), you can install
the library with:
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md
index bf859e42..4a3e7202 100644
--- a/docs/presence_router_module.md
+++ b/docs/presence_router_module.md
@@ -222,7 +222,9 @@ Synapse, amend your homeserver config file with the following.
```yaml
presence:
- routing_module:
+ enabled: true
+
+ presence_router:
module: my_module.ExamplePresenceRouter
config:
# Any configuration options for your module. The below is an example.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 6fcc022b..054770f7 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -8,7 +8,8 @@
#
# It is *not* intended to be copied and used as the basis for a real
# homeserver.yaml. Instead, if you are starting from scratch, please generate
-# a fresh config using Synapse by following the instructions in INSTALL.md.
+# a fresh config using Synapse by following the instructions in
+# https://matrix-org.github.io/synapse/latest/setup/installation.html.
# Configuration options that take a time period can be set using a number
# followed by a letter. Letters have the following meanings:
@@ -36,7 +37,7 @@
# Server admins can expand Synapse's functionality with external modules.
#
-# See https://matrix-org.github.io/synapse/develop/modules.html for more
+# See https://matrix-org.github.io/synapse/latest/modules.html for more
# documentation on how to configure or create custom modules for Synapse.
#
modules:
@@ -58,7 +59,7 @@ modules:
# In most cases you should avoid using a matrix specific subdomain such as
# matrix.example.com or synapse.example.com as the server_name for the same
# reasons you wouldn't use user@email.example.com as your email address.
-# See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md
+# See https://matrix-org.github.io/synapse/latest/delegate.html
# for information on how to host Synapse on a subdomain while preserving
# a clean server_name.
#
@@ -253,9 +254,9 @@ presence:
# 'all local interfaces'.
#
# type: the type of listener. Normally 'http', but other valid options are:
-# 'manhole' (see docs/manhole.md),
-# 'metrics' (see docs/metrics-howto.md),
-# 'replication' (see docs/workers.md).
+# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
+# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
+# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
@@ -280,8 +281,8 @@ presence:
# client: the client-server API (/_matrix/client), and the synapse admin
# API (/_synapse/admin). Also implies 'media' and 'static'.
#
-# consent: user consent forms (/_matrix/consent). See
-# docs/consent_tracking.md.
+# consent: user consent forms (/_matrix/consent).
+# See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
#
# federation: the server-server API (/_matrix/federation). Also implies
# 'media', 'keys', 'openid'
@@ -290,12 +291,13 @@ presence:
#
# media: the media API (/_matrix/media).
#
-# metrics: the metrics interface. See docs/metrics-howto.md.
+# metrics: the metrics interface.
+# See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
#
# openid: OpenID authentication.
#
-# replication: the HTTP replication API (/_synapse/replication). See
-# docs/workers.md.
+# replication: the HTTP replication API (/_synapse/replication).
+# See https://matrix-org.github.io/synapse/latest/workers.html.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
@@ -319,7 +321,7 @@ listeners:
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
- # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
+ # https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
- port: 8008
tls: false
@@ -673,35 +675,41 @@ retention:
#event_cache_size: 10K
caches:
- # Controls the global cache factor, which is the default cache factor
- # for all caches if a specific factor for that cache is not otherwise
- # set.
- #
- # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
- # variable. Setting by environment variable takes priority over
- # setting through the config file.
- #
- # Defaults to 0.5, which will half the size of all caches.
- #
- #global_factor: 1.0
+ # Controls the global cache factor, which is the default cache factor
+ # for all caches if a specific factor for that cache is not otherwise
+ # set.
+ #
+ # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
+ # variable. Setting by environment variable takes priority over
+ # setting through the config file.
+ #
+ # Defaults to 0.5, which will half the size of all caches.
+ #
+ #global_factor: 1.0
- # A dictionary of cache name to cache factor for that individual
- # cache. Overrides the global cache factor for a given cache.
- #
- # These can also be set through environment variables comprised
- # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
- # letters and underscores. Setting by environment variable
- # takes priority over setting through the config file.
- # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
- #
- # Some caches have '*' and other characters that are not
- # alphanumeric or underscores. These caches can be named with or
- # without the special characters stripped. For example, to specify
- # the cache factor for `*stateGroupCache*` via an environment
- # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
- #
- per_cache_factors:
- #get_users_who_share_room_with_user: 2.0
+ # A dictionary of cache name to cache factor for that individual
+ # cache. Overrides the global cache factor for a given cache.
+ #
+ # These can also be set through environment variables comprised
+ # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
+ # letters and underscores. Setting by environment variable
+ # takes priority over setting through the config file.
+ # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
+ #
+ # Some caches have '*' and other characters that are not
+ # alphanumeric or underscores. These caches can be named with or
+ # without the special characters stripped. For example, to specify
+ # the cache factor for `*stateGroupCache*` via an environment
+ # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
+ #
+ per_cache_factors:
+ #get_users_who_share_room_with_user: 2.0
+
+ # Controls how long an entry can be in a cache without having been
+ # accessed before being evicted. Defaults to None, which means
+ # entries are never evicted based on time.
+ #
+ #expiry_time: 30m
## Database ##
@@ -741,7 +749,8 @@ caches:
# cp_min: 5
# cp_max: 10
#
-# For more information on using Synapse with Postgres, see `docs/postgres.md`.
+# For more information on using Synapse with Postgres,
+# see https://matrix-org.github.io/synapse/latest/postgres.html.
#
database:
name: sqlite3
@@ -894,7 +903,7 @@ media_store_path: "DATADIR/media_store"
#
# If you are using a reverse proxy you may also need to set this value in
# your reverse proxy's config. Notably Nginx has a small max body size by default.
-# See https://matrix-org.github.io/synapse/develop/reverse_proxy.html.
+# See https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
#max_upload_size: 50M
@@ -1834,7 +1843,7 @@ saml2_config:
#
# module: The class name of a custom mapping module. Default is
# 'synapse.handlers.oidc.JinjaOidcMappingProvider'.
-# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
+# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
# for information on implementing a custom mapping provider.
#
# config: Configuration for the mapping provider module. This section will
@@ -1885,7 +1894,7 @@ saml2_config:
# - attribute: groups
# value: "admin"
#
-# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
+# See https://matrix-org.github.io/synapse/latest/openid.html
# for information on how to configure these options.
#
# For backwards compatibility, it is also possible to configure a single OIDC
@@ -2163,7 +2172,7 @@ sso:
# Note that this is a non-standard login type and client support is
# expected to be non-existent.
#
-# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
+# See https://matrix-org.github.io/synapse/latest/jwt.html.
#
#jwt_config:
# Uncomment the following to enable authorization using JSON web
@@ -2463,7 +2472,7 @@ email:
# ex. LDAP, external tokens, etc.
#
# For more information and known implementations, please see
-# https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md
+# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
#
# Note: instances wishing to use SAML or CAS authentication should
# instead use the `saml2_config` or `cas_config` options,
@@ -2565,7 +2574,7 @@ user_directory:
#
# If you set it true, you'll have to rebuild the user_directory search
# indexes, see:
- # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
+ # https://matrix-org.github.io/synapse/latest/user_directory.html
#
# Uncomment to return search results containing all known users, even if that
# user does not share a room with the requester.
@@ -2585,7 +2594,7 @@ user_directory:
# User Consent configuration
#
# for detailed instructions, see
-# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
+# https://matrix-org.github.io/synapse/latest/consent_tracking.html
#
# Parts of this section are required if enabling the 'consent' resource under
# 'listeners', in particular 'template_dir' and 'version'.
@@ -2635,7 +2644,7 @@ user_directory:
# Settings for local room and user statistics collection. See
-# docs/room_and_user_statistics.md.
+# https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html.
#
stats:
# Uncomment the following to disable room and user statistics. Note that doing
@@ -2762,7 +2771,7 @@ opentracing:
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
- # See docs/opentracing.rst.
+ # See https://matrix-org.github.io/synapse/latest/opentracing.html.
#
# This is a list of regexes which are matched against the server_name of the
# homeserver.
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index ff3c7471..669e6000 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -7,7 +7,7 @@
# be ingested by ELK stacks. See [2] for details.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
-# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
+# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 8bb1cffd..d041d083 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -1,7 +1,596 @@
-<!--
- Include the contents of INSTALL.md from the project root without moving it, which may
- break links around the internet. Additionally, note that SUMMARY.md is unable to
- directly link to content outside of the docs/ directory. So we use this file as a
- redirection.
--->
-{{#include ../../INSTALL.md}} \ No newline at end of file
+# Installation Instructions
+
+There are 3 steps to follow under **Installation Instructions**.
+
+- [Installation Instructions](#installation-instructions)
+ - [Choosing your server name](#choosing-your-server-name)
+ - [Installing Synapse](#installing-synapse)
+ - [Installing from source](#installing-from-source)
+ - [Platform-specific prerequisites](#platform-specific-prerequisites)
+ - [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
+ - [ArchLinux](#archlinux)
+ - [CentOS/Fedora](#centosfedora)
+ - [macOS](#macos)
+ - [OpenSUSE](#opensuse)
+ - [OpenBSD](#openbsd)
+ - [Windows](#windows)
+ - [Prebuilt packages](#prebuilt-packages)
+ - [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
+ - [Debian/Ubuntu](#debianubuntu)
+ - [Matrix.org packages](#matrixorg-packages)
+ - [Downstream Debian packages](#downstream-debian-packages)
+ - [Downstream Ubuntu packages](#downstream-ubuntu-packages)
+ - [Fedora](#fedora)
+ - [OpenSUSE](#opensuse-1)
+ - [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
+ - [ArchLinux](#archlinux-1)
+ - [Void Linux](#void-linux)
+ - [FreeBSD](#freebsd)
+ - [OpenBSD](#openbsd-1)
+ - [NixOS](#nixos)
+ - [Setting up Synapse](#setting-up-synapse)
+ - [Using PostgreSQL](#using-postgresql)
+ - [TLS certificates](#tls-certificates)
+ - [Client Well-Known URI](#client-well-known-uri)
+ - [Email](#email)
+ - [Registering a user](#registering-a-user)
+ - [Setting up a TURN server](#setting-up-a-turn-server)
+ - [URL previews](#url-previews)
+ - [Troubleshooting Installation](#troubleshooting-installation)
+
+
+## Choosing your server name
+
+It is important to choose the name for your server before you install Synapse,
+because it cannot be changed later.
+
+The server name determines the "domain" part of user-ids for users on your
+server: these will all be of the format `@user:my.domain.name`. It also
+determines how other matrix servers will reach yours for federation.
+
+For a test configuration, set this to the hostname of your server. For a more
+production-ready setup, you will probably want to specify your domain
+(`example.com`) rather than a matrix-specific hostname here (in the same way
+that your email address is probably `user@example.com` rather than
+`user@email.example.com`) - but doing so may require more advanced setup: see
+[Setting up Federation](../federate.md).
+
+## Installing Synapse
+
+### Installing from source
+
+(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
+
+When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
+
+System requirements:
+
+- POSIX-compliant system (tested on Linux & OS X)
+- Python 3.5.2 or later, up to Python 3.9.
+- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
+
+
+To install the Synapse homeserver run:
+
+```sh
+mkdir -p ~/synapse
+virtualenv -p python3 ~/synapse/env
+source ~/synapse/env/bin/activate
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install matrix-synapse
+```
+
+This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
+and install it, along with the python libraries it uses, into a virtual environment
+under `~/synapse/env`. Feel free to pick a different directory if you
+prefer.
+
+This Synapse installation can then be later upgraded by using pip again with the
+update flag:
+
+```sh
+source ~/synapse/env/bin/activate
+pip install -U matrix-synapse
+```
+
+Before you can start Synapse, you will need to generate a configuration
+file. To do this, run (in your virtualenv, as before):
+
+```sh
+cd ~/synapse
+python -m synapse.app.homeserver \
+ --server-name my.domain.name \
+ --config-path homeserver.yaml \
+ --generate-config \
+ --report-stats=[yes|no]
+```
+
+... substituting an appropriate value for `--server-name`.
+
+This command will generate you a config file that you can then customise, but it will
+also generate a set of keys for you. These keys will allow your homeserver to
+identify itself to other homeserver, so don't lose or delete them. It would be
+wise to back them up somewhere safe. (If, for whatever reason, you do need to
+change your homeserver's keys, you may find that other homeserver have the
+old key cached. If you update the signing key, you should change the name of the
+key in the `<server name>.signing.key` file (the second word) to something
+different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
+
+To actually run your new homeserver, pick a working directory for Synapse to
+run (e.g. `~/synapse`), and:
+
+```sh
+cd ~/synapse
+source env/bin/activate
+synctl start
+```
+
+#### Platform-specific prerequisites
+
+Synapse is written in Python but some of the libraries it uses are written in
+C. So before we can install Synapse itself we need a working C compiler and the
+header files for Python C extensions.
+
+##### Debian/Ubuntu/Raspbian
+
+Installing prerequisites on Ubuntu or Debian:
+
+```sh
+sudo apt install build-essential python3-dev libffi-dev \
+ python3-pip python3-setuptools sqlite3 \
+ libssl-dev virtualenv libjpeg-dev libxslt1-dev
+```
+
+##### ArchLinux
+
+Installing prerequisites on ArchLinux:
+
+```sh
+sudo pacman -S base-devel python python-pip \
+ python-setuptools python-virtualenv sqlite3
+```
+
+##### CentOS/Fedora
+
+Installing prerequisites on CentOS or Fedora Linux:
+
+```sh
+sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
+ libwebp-devel libxml2-devel libxslt-devel libpq-devel \
+ python3-virtualenv libffi-devel openssl-devel python3-devel
+sudo dnf groupinstall "Development Tools"
+```
+
+##### macOS
+
+Installing prerequisites on macOS:
+
+```sh
+xcode-select --install
+sudo easy_install pip
+sudo pip install virtualenv
+brew install pkg-config libffi
+```
+
+On macOS Catalina (10.15) you may need to explicitly install OpenSSL
+via brew and inform `pip` about it so that `psycopg2` builds:
+
+```sh
+brew install openssl@1.1
+export LDFLAGS="-L/usr/local/opt/openssl/lib"
+export CPPFLAGS="-I/usr/local/opt/openssl/include"
+```
+
+##### OpenSUSE
+
+Installing prerequisites on openSUSE:
+
+```sh
+sudo zypper in -t pattern devel_basis
+sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
+ python-devel libffi-devel libopenssl-devel libjpeg62-devel
+```
+
+##### OpenBSD
+
+A port of Synapse is available under `net/synapse`. The filesystem
+underlying the homeserver directory (defaults to `/var/synapse`) has to be
+mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
+and mounting it to `/var/synapse` should be taken into consideration.
+
+To be able to build Synapse's dependency on python the `WRKOBJDIR`
+(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
+mounted with `wxallowed` (cf. `mount(8)`).
+
+Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
+default OpenBSD installation is mounted with `wxallowed`):
+
+```sh
+doas mkdir /usr/local/pobj_wxallowed
+```
+
+Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
+configured in `/etc/mk.conf`:
+
+```sh
+doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
+```
+
+Setting the `WRKOBJDIR` for building python:
+
+```sh
+echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
+```
+
+Building Synapse:
+
+```sh
+cd /usr/ports/net/synapse
+make install
+```
+
+##### Windows
+
+If you wish to run or develop Synapse on Windows, the Windows Subsystem For
+Linux provides a Linux environment on Windows 10 which is capable of using the
+Debian, Fedora, or source installation methods. More information about WSL can
+be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
+Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
+for Windows Server.
+
+### Prebuilt packages
+
+As an alternative to installing from source, prebuilt packages are available
+for a number of platforms.
+
+#### Docker images and Ansible playbooks
+
+There is an official synapse image available at
+<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
+the docker-compose file available at
+[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
+Further information on this including configuration options is available in the README
+on hub.docker.com.
+
+Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
+Dockerfile to automate a synapse server in a single Docker image, at
+<https://hub.docker.com/r/avhost/docker-matrix/tags/>
+
+Slavi Pantaleev has created an Ansible playbook,
+which installs the offical Docker image of Matrix Synapse
+along with many other Matrix-related services (Postgres database, Element, coturn,
+ma1sd, SSL support, etc.).
+For more details, see
+<https://github.com/spantaleev/matrix-docker-ansible-deploy>
+
+#### Debian/Ubuntu
+
+##### Matrix.org packages
+
+Matrix.org provides Debian/Ubuntu packages of the latest stable version of
+Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
+9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
+
+```sh
+sudo apt install -y lsb-release wget apt-transport-https
+sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
+echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
+ sudo tee /etc/apt/sources.list.d/matrix-org.list
+sudo apt update
+sudo apt install matrix-synapse-py3
+```
+
+**Note**: if you followed a previous version of these instructions which
+recommended using `apt-key add` to add an old key from
+`https://matrix.org/packages/debian/`, you should note that this key has been
+revoked. You should remove the old key with `sudo apt-key remove
+C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
+update your configuration.
+
+The fingerprint of the repository signing key (as shown by `gpg
+/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
+`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
+
+##### Downstream Debian packages
+
+We do not recommend using the packages from the default Debian `buster`
+repository at this time, as they are old and suffer from known security
+vulnerabilities. You can install the latest version of Synapse from
+[our repository](#matrixorg-packages) or from `buster-backports`. Please
+see the [Debian documentation](https://backports.debian.org/Instructions/)
+for information on how to use backports.
+
+If you are using Debian `sid` or testing, Synapse is available in the default
+repositories and it should be possible to install it simply with:
+
+```sh
+sudo apt install matrix-synapse
+```
+
+##### Downstream Ubuntu packages
+
+We do not recommend using the packages in the default Ubuntu repository
+at this time, as they are old and suffer from known security vulnerabilities.
+The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
+
+#### Fedora
+
+Synapse is in the Fedora repositories as `matrix-synapse`:
+
+```sh
+sudo dnf install matrix-synapse
+```
+
+Oleg Girko provides Fedora RPMs at
+<https://obs.infoserver.lv/project/monitor/matrix-synapse>
+
+#### OpenSUSE
+
+Synapse is in the OpenSUSE repositories as `matrix-synapse`:
+
+```sh
+sudo zypper install matrix-synapse
+```
+
+#### SUSE Linux Enterprise Server
+
+Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
+<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
+
+#### ArchLinux
+
+The quickest way to get up and running with ArchLinux is probably with the community package
+<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
+the necessary dependencies.
+
+pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
+
+```sh
+sudo pip install --upgrade pip
+```
+
+If you encounter an error with lib bcrypt causing an Wrong ELF Class:
+ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
+compile it under the right architecture. (This should not be needed if
+installing under virtualenv):
+
+```sh
+sudo pip uninstall py-bcrypt
+sudo pip install py-bcrypt
+```
+
+#### Void Linux
+
+Synapse can be found in the void repositories as 'synapse':
+
+```sh
+xbps-install -Su
+xbps-install -S synapse
+```
+
+#### FreeBSD
+
+Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
+
+- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
+- Packages: `pkg install py37-matrix-synapse`
+
+#### OpenBSD
+
+As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
+underlying the homeserver directory (defaults to `/var/synapse`) has to be
+mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
+and mounting it to `/var/synapse` should be taken into consideration.
+
+Installing Synapse:
+
+```sh
+doas pkg_add synapse
+```
+
+#### NixOS
+
+Robin Lambertz has packaged Synapse for NixOS at:
+<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
+
+## Setting up Synapse
+
+Once you have installed synapse as above, you will need to configure it.
+
+### Using PostgreSQL
+
+By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
+performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
+instead. Advantages include:
+
+- significant performance improvements due to the superior threading and
+ caching model, smarter query optimiser
+- allowing the DB to be run on separate hardware
+
+For information on how to install and use PostgreSQL in Synapse, please see
+[docs/postgres.md](../postgres.md)
+
+SQLite is only acceptable for testing purposes. SQLite should not be used in
+a production server. Synapse will perform poorly when using
+SQLite, especially when participating in large rooms.
+
+### TLS certificates
+
+The default configuration exposes a single HTTP port on the local
+interface: `http://localhost:8008`. It is suitable for local testing,
+but for any practical use, you will need Synapse's APIs to be served
+over HTTPS.
+
+The recommended way to do so is to set up a reverse proxy on port
+`8448`. You can find documentation on doing so in
+[docs/reverse_proxy.md](../reverse_proxy.md).
+
+Alternatively, you can configure Synapse to expose an HTTPS port. To do
+so, you will need to edit `homeserver.yaml`, as follows:
+
+- First, under the `listeners` section, uncomment the configuration for the
+ TLS-enabled listener. (Remove the hash sign (`#`) at the start of
+ each line). The relevant lines are like this:
+
+```yaml
+ - port: 8448
+ type: http
+ tls: true
+ resources:
+ - names: [client, federation]
+ ```
+
+- You will also need to uncomment the `tls_certificate_path` and
+ `tls_private_key_path` lines under the `TLS` section. You will need to manage
+ provisioning of these certificates yourself.
+
+ If you are using your own certificate, be sure to use a `.pem` file that
+ includes the full certificate chain including any intermediate certificates
+ (for instance, if using certbot, use `fullchain.pem` as your certificate, not
+ `cert.pem`).
+
+For a more detailed guide to configuring your server for federation, see
+[federate.md](../federate.md).
+
+### Client Well-Known URI
+
+Setting up the client Well-Known URI is optional but if you set it up, it will
+allow users to enter their full username (e.g. `@user:<server_name>`) into clients
+which support well-known lookup to automatically configure the homeserver and
+identity server URLs. This is useful so that users don't have to memorize or think
+about the actual homeserver URL you are using.
+
+The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
+the following format.
+
+```json
+{
+ "m.homeserver": {
+ "base_url": "https://<matrix.example.com>"
+ }
+}
+```
+
+It can optionally contain identity server information as well.
+
+```json
+{
+ "m.homeserver": {
+ "base_url": "https://<matrix.example.com>"
+ },
+ "m.identity_server": {
+ "base_url": "https://<identity.example.com>"
+ }
+}
+```
+
+To work in browser based clients, the file must be served with the appropriate
+Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
+`Access-Control-Allow-Origin: *` which would allow all browser based clients to
+view it.
+
+In nginx this would be something like:
+
+```nginx
+location /.well-known/matrix/client {
+ return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
+ default_type application/json;
+ add_header Access-Control-Allow-Origin *;
+}
+```
+
+You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
+correctly. `public_baseurl` should be set to the URL that clients will use to
+connect to your server. This is the same URL you put for the `m.homeserver`
+`base_url` above.
+
+```yaml
+public_baseurl: "https://<matrix.example.com>"
+```
+
+### Email
+
+It is desirable for Synapse to have the capability to send email. This allows
+Synapse to send password reset emails, send verifications when an email address
+is added to a user's account, and send email notifications to users when they
+receive new messages.
+
+To configure an SMTP server for Synapse, modify the configuration section
+headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
+and `notif_from` fields filled out. You may also need to set `smtp_user`,
+`smtp_pass`, and `require_transport_security`.
+
+If email is not configured, password reset, registration and notifications via
+email will be disabled.
+
+### Registering a user
+
+The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
+
+Alternatively, you can do so from the command line. This can be done as follows:
+
+ 1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
+ installed via a prebuilt package, `register_new_matrix_user` should already be
+ on the search path):
+ ```sh
+ cd ~/synapse
+ source env/bin/activate
+ synctl start # if not already running
+ ```
+ 2. Run the following command:
+ ```sh
+ register_new_matrix_user -c homeserver.yaml http://localhost:8008
+ ```
+
+This will prompt you to add details for the new user, and will then connect to
+the running Synapse to create the new user. For example:
+```
+New user localpart: erikj
+Password:
+Confirm password:
+Make admin [no]:
+Success!
+```
+
+This process uses a setting `registration_shared_secret` in
+`homeserver.yaml`, which is shared between Synapse itself and the
+`register_new_matrix_user` script. It doesn't matter what it is (a random
+value is generated by `--generate-config`), but it should be kept secret, as
+anyone with knowledge of it can register users, including admin accounts,
+on your server even if `enable_registration` is `false`.
+
+### Setting up a TURN server
+
+For reliable VoIP calls to be routed via this homeserver, you MUST configure
+a TURN server. See
+[docs/turn-howto.md](../turn-howto.md)
+for details.
+
+### URL previews
+
+Synapse includes support for previewing URLs, which is disabled by default. To
+turn it on you must enable the `url_preview_enabled: True` config parameter
+and explicitly specify the IP ranges that Synapse is not allowed to spider for
+previewing in the `url_preview_ip_range_blacklist` configuration parameter.
+This is critical from a security perspective to stop arbitrary Matrix users
+spidering 'internal' URLs on your network. At the very least we recommend that
+your loopback and RFC1918 IP addresses are blacklisted.
+
+This also requires the optional `lxml` python dependency to be installed. This
+in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
+means `apt-get install libxml2-dev`, or equivalent for your OS.
+
+### Troubleshooting Installation
+
+`pip` seems to leak *lots* of memory during installation. For instance, a Linux
+host with 512MB of RAM may run out of memory whilst installing Twisted. If this
+happens, you will have to individually install the dependencies which are
+failing, e.g.:
+
+```sh
+pip install twisted
+```
+
+If you have any other problems, feel free to ask in
+[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
diff --git a/docs/upgrade.md b/docs/upgrade.md
new file mode 100644
index 00000000..db0450f5
--- /dev/null
+++ b/docs/upgrade.md
@@ -0,0 +1,1391 @@
+# Upgrading Synapse
+
+Before upgrading check if any special steps are required to upgrade from
+the version you currently have installed to the current version of
+Synapse. The extra instructions that may be required are listed later in
+this document.
+
+- Check that your versions of Python and PostgreSQL are still
+ supported.
+
+ Synapse follows upstream lifecycles for [Python](https://endoflife.date/python) and
+ [PostgreSQL](https://endoflife.date/postgresql), and removes support for versions
+ which are no longer maintained.
+
+ The website <https://endoflife.date> also offers convenient
+ summaries.
+
+- If Synapse was installed using [prebuilt
+ packages](setup/installation.md#prebuilt-packages), you will need to follow the
+ normal process for upgrading those packages.
+
+- If Synapse was installed from source, then:
+
+ 1. Activate the virtualenv before upgrading. For example, if
+ Synapse is installed in a virtualenv in `~/synapse/env` then
+ run:
+
+ ```bash
+ source ~/synapse/env/bin/activate
+ ```
+
+ 2. If Synapse was installed using pip then upgrade to the latest
+ version by running:
+
+ ```bash
+ pip install --upgrade matrix-synapse
+ ```
+
+ If Synapse was installed using git then upgrade to the latest
+ version by running:
+
+ ```bash
+ git pull
+ pip install --upgrade .
+ ```
+
+ 3. Restart Synapse:
+
+ ```bash
+ ./synctl restart
+ ```
+
+To check whether your update was successful, you can check the running
+server version with:
+
+```bash
+# you may need to replace 'localhost:8008' if synapse is not configured
+# to listen on port 8008.
+
+curl http://localhost:8008/_synapse/admin/v1/server_version
+```
+
+## Rolling back to older versions
+
+Rolling back to previous releases can be difficult, due to database
+schema changes between releases. Where we have been able to test the
+rollback process, this will be noted below.
+
+In general, you will need to undo any changes made during the upgrade
+process, for example:
+
+- pip:
+
+ ```bash
+ source env/bin/activate
+ # replace `1.3.0` accordingly:
+ pip install matrix-synapse==1.3.0
+ ```
+
+- Debian:
+
+ ```bash
+ # replace `1.3.0` and `stretch` accordingly:
+ wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+ dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+ ```
+
+
+# Upgrading to v1.38.0
+
+## Re-indexing of `events` table on Postgres databases
+
+This release includes a database schema update which requires re-indexing one of
+the larger tables in the database, `events`. This could result in increased
+disk I/O for several hours or days after upgrading while the migration
+completes. Furthermore, because we have to keep the old indexes until the new
+indexes are ready, it could result in a significant, temporary, increase in
+disk space.
+
+To get a rough idea of the disk space required, check the current size of one
+of the indexes. For example, from a `psql` shell, run the following sql:
+
+```sql
+SELECT pg_size_pretty(pg_relation_size('events_order_room'));
+```
+
+We need to rebuild **four** indexes, so you will need to multiply this result
+by four to give an estimate of the disk space required. For example, on one
+particular server:
+
+```
+synapse=# select pg_size_pretty(pg_relation_size('events_order_room'));
+ pg_size_pretty
+----------------
+ 288 MB
+(1 row)
+```
+
+On this server, it would be wise to ensure that at least 1152MB are free.
+
+The additional disk space will be freed once the migration completes.
+
+SQLite databases are unaffected by this change.
+
+
+# Upgrading to v1.37.0
+
+## Deprecation of the current spam checker interface
+
+The current spam checker interface is deprecated in favour of a new generic modules system.
+Authors of spam checker modules can refer to [this
+documentation](https://matrix-org.github.io/synapse/develop/modules.html#porting-an-existing-module-that-uses-the-old-interface)
+to update their modules. Synapse administrators can refer to [this
+documentation](https://matrix-org.github.io/synapse/develop/modules.html#using-modules)
+to update their configuration once the modules they are using have been updated.
+
+We plan to remove support for the current spam checker interface in August 2021.
+
+More module interfaces will be ported over to this new generic system in future versions
+of Synapse.
+
+
+# Upgrading to v1.34.0
+
+## `room_invite_state_types` configuration setting
+
+The `room_invite_state_types` configuration setting has been deprecated
+and replaced with `room_prejoin_state`. See the [sample configuration
+file](https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515).
+
+If you have set `room_invite_state_types` to the default value you
+should simply remove it from your configuration file. The default value
+used to be:
+
+```yaml
+room_invite_state_types:
+ - "m.room.join_rules"
+ - "m.room.canonical_alias"
+ - "m.room.avatar"
+ - "m.room.encryption"
+ - "m.room.name"
+```
+
+If you have customised this value, you should remove
+`room_invite_state_types` and configure `room_prejoin_state` instead.
+
+# Upgrading to v1.33.0
+
+## Account Validity HTML templates can now display a user's expiration date
+
+This may affect you if you have enabled the account validity feature,
+and have made use of a custom HTML template specified by the
+`account_validity.template_dir` or
+`account_validity.account_renewed_html_path` Synapse config options.
+
+The template can now accept an `expiration_ts` variable, which
+represents the unix timestamp in milliseconds for the future date of
+which their account has been renewed until. See the [default
+template](https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_renewed.html)
+for an example of usage.
+
+ALso note that a new HTML template, `account_previously_renewed.html`,
+has been added. This is is shown to users when they attempt to renew
+their account with a valid renewal token that has already been used
+before. The default template contents can been found
+[here](https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_previously_renewed.html),
+and can also accept an `expiration_ts` variable. This template replaces
+the error message users would previously see upon attempting to use a
+valid renewal token more than once.
+
+# Upgrading to v1.32.0
+
+## Regression causing connected Prometheus instances to become overwhelmed
+
+This release introduces [a
+regression](https://github.com/matrix-org/synapse/issues/9853) that can
+overwhelm connected Prometheus instances. This issue is not present in
+Synapse v1.32.0rc1.
+
+If you have been affected, please downgrade to 1.31.0. You then may need
+to remove excess writeahead logs in order for Prometheus to recover.
+Instructions for doing so are provided
+[here](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183).
+
+## Dropping support for old Python, Postgres and SQLite versions
+
+In line with our [deprecation
+policy](https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md),
+we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no
+longer supported upstream.
+
+This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or
+SQLite 3.22+.
+
+## Removal of old List Accounts Admin API
+
+The deprecated v1 "list accounts" admin API
+(`GET /_synapse/admin/v1/users/<user_id>`) has been removed in this
+version.
+
+The [v2 list accounts
+API](https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts)
+has been available since Synapse 1.7.0 (2019-12-13), and is accessible
+under `GET /_synapse/admin/v2/users`.
+
+The deprecation of the old endpoint was announced with Synapse 1.28.0
+(released on 2021-02-25).
+
+## Application Services must use type `m.login.application_service` when registering users
+
+In compliance with the [Application Service
+spec](https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions),
+Application Services are now required to use the
+`m.login.application_service` type when registering users via the
+`/_matrix/client/r0/register` endpoint. This behaviour was deprecated in
+Synapse v1.30.0.
+
+Please ensure your Application Services are up to date.
+
+# Upgrading to v1.29.0
+
+## Requirement for X-Forwarded-Proto header
+
+When using Synapse with a reverse proxy (in particular, when using the
+[x_forwarded]{.title-ref} option on an HTTP listener), Synapse now
+expects to receive an [X-Forwarded-Proto]{.title-ref} header on incoming
+HTTP requests. If it is not set, Synapse will log a warning on each
+received request.
+
+To avoid the warning, administrators using a reverse proxy should ensure
+that the reverse proxy sets [X-Forwarded-Proto]{.title-ref} header to
+[https]{.title-ref} or [http]{.title-ref} to indicate the protocol used
+by the client.
+
+Synapse also requires the [Host]{.title-ref} header to be preserved.
+
+See the [reverse proxy documentation](../reverse_proxy.md), where the
+example configurations have been updated to show how to set these
+headers.
+
+(Users of [Caddy](https://caddyserver.com/) are unaffected, since we
+believe it sets [X-Forwarded-Proto]{.title-ref} by default.)
+
+# Upgrading to v1.27.0
+
+## Changes to callback URI for OAuth2 / OpenID Connect and SAML2
+
+This version changes the URI used for callbacks from OAuth2 and SAML2
+identity providers:
+
+- If your server is configured for single sign-on via an OpenID
+ Connect or OAuth2 identity provider, you will need to add
+ `[synapse public baseurl]/_synapse/client/oidc/callback` to the list
+ of permitted "redirect URIs" at the identity provider.
+
+ See the [OpenID docs](../openid.md) for more information on setting
+ up OpenID Connect.
+
+- If your server is configured for single sign-on via a SAML2 identity
+ provider, you will need to add
+ `[synapse public baseurl]/_synapse/client/saml2/authn_response` as a
+ permitted "ACS location" (also known as "allowed callback URLs")
+ at the identity provider.
+
+ The "Issuer" in the "AuthnRequest" to the SAML2 identity
+ provider is also updated to
+ `[synapse public baseurl]/_synapse/client/saml2/metadata.xml`. If
+ your SAML2 identity provider uses this property to validate or
+ otherwise identify Synapse, its configuration will need to be
+ updated to use the new URL. Alternatively you could create a new,
+ separate "EntityDescriptor" in your SAML2 identity provider with
+ the new URLs and leave the URLs in the existing "EntityDescriptor"
+ as they were.
+
+## Changes to HTML templates
+
+The HTML templates for SSO and email notifications now have [Jinja2's
+autoescape](https://jinja.palletsprojects.com/en/2.11.x/api/#autoescaping)
+enabled for files ending in `.html`, `.htm`, and `.xml`. If you have
+customised these templates and see issues when viewing them you might
+need to update them. It is expected that most configurations will need
+no changes.
+
+If you have customised the templates *names* for these templates, it is
+recommended to verify they end in `.html` to ensure autoescape is
+enabled.
+
+The above applies to the following templates:
+
+- `add_threepid.html`
+- `add_threepid_failure.html`
+- `add_threepid_success.html`
+- `notice_expiry.html`
+- `notice_expiry.html`
+- `notif_mail.html` (which, by default, includes `room.html` and
+ `notif.html`)
+- `password_reset.html`
+- `password_reset_confirmation.html`
+- `password_reset_failure.html`
+- `password_reset_success.html`
+- `registration.html`
+- `registration_failure.html`
+- `registration_success.html`
+- `sso_account_deactivated.html`
+- `sso_auth_bad_user.html`
+- `sso_auth_confirm.html`
+- `sso_auth_success.html`
+- `sso_error.html`
+- `sso_login_idp_picker.html`
+- `sso_redirect_confirm.html`
+
+# Upgrading to v1.26.0
+
+## Rolling back to v1.25.0 after a failed upgrade
+
+v1.26.0 includes a lot of large changes. If something problematic
+occurs, you may want to roll-back to a previous version of Synapse.
+Because v1.26.0 also includes a new database schema version, reverting
+that version is also required alongside the generic rollback
+instructions mentioned above. In short, to roll back to v1.25.0 you need
+to:
+
+1. Stop the server
+
+2. Decrease the schema version in the database:
+
+ ```sql
+ UPDATE schema_version SET version = 58;
+ ```
+
+3. Delete the ignored users & chain cover data:
+
+ ```sql
+ DROP TABLE IF EXISTS ignored_users;
+ UPDATE rooms SET has_auth_chain_index = false;
+ ```
+
+ For PostgreSQL run:
+
+ ```sql
+ TRUNCATE event_auth_chain_links;
+ TRUNCATE event_auth_chains;
+ ```
+
+ For SQLite run:
+
+ ```sql
+ DELETE FROM event_auth_chain_links;
+ DELETE FROM event_auth_chains;
+ ```
+
+4. Mark the deltas as not run (so they will re-run on upgrade).
+
+ ```sql
+ DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
+ DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
+ ```
+
+5. Downgrade Synapse by following the instructions for your
+ installation method in the "Rolling back to older versions"
+ section above.
+
+# Upgrading to v1.25.0
+
+## Last release supporting Python 3.5
+
+This is the last release of Synapse which guarantees support with Python
+3.5, which passed its upstream End of Life date several months ago.
+
+We will attempt to maintain support through March 2021, but without
+guarantees.
+
+In the future, Synapse will follow upstream schedules for ending support
+of older versions of Python and PostgreSQL. Please upgrade to at least
+Python 3.6 and PostgreSQL 9.6 as soon as possible.
+
+## Blacklisting IP ranges
+
+Synapse v1.25.0 includes new settings, `ip_range_blacklist` and
+`ip_range_whitelist`, for controlling outgoing requests from Synapse for
+federation, identity servers, push, and for checking key validity for
+third-party invite events. The previous setting,
+`federation_ip_range_blacklist`, is deprecated. The new
+`ip_range_blacklist` defaults to private IP ranges if it is not defined.
+
+If you have never customised `federation_ip_range_blacklist` it is
+recommended that you remove that setting.
+
+If you have customised `federation_ip_range_blacklist` you should update
+the setting name to `ip_range_blacklist`.
+
+If you have a custom push server that is reached via private IP space
+you may need to customise `ip_range_blacklist` or `ip_range_whitelist`.
+
+# Upgrading to v1.24.0
+
+## Custom OpenID Connect mapping provider breaking change
+
+This release allows the OpenID Connect mapping provider to perform
+normalisation of the localpart of the Matrix ID. This allows for the
+mapping provider to specify different algorithms, instead of the
+[default
+way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>).
+
+If your Synapse configuration uses a custom mapping provider
+([oidc_config.user_mapping_provider.module]{.title-ref} is specified and
+not equal to
+[synapse.handlers.oidc_handler.JinjaOidcMappingProvider]{.title-ref})
+then you *must* ensure that [map_user_attributes]{.title-ref} of the
+mapping provider performs some normalisation of the
+[localpart]{.title-ref} returned. To match previous behaviour you can
+use the [map_username_to_mxid_localpart]{.title-ref} function provided
+by Synapse. An example is shown below:
+
+```python
+from synapse.types import map_username_to_mxid_localpart
+
+class MyMappingProvider:
+ def map_user_attributes(self, userinfo, token):
+ # ... your custom logic ...
+ sso_user_id = ...
+ localpart = map_username_to_mxid_localpart(sso_user_id)
+
+ return {"localpart": localpart}
+```
+
+## Removal historical Synapse Admin API
+
+Historically, the Synapse Admin API has been accessible under:
+
+- `/_matrix/client/api/v1/admin`
+- `/_matrix/client/unstable/admin`
+- `/_matrix/client/r0/admin`
+- `/_synapse/admin/v1`
+
+The endpoints with `/_matrix/client/*` prefixes have been removed as of
+v1.24.0. The Admin API is now only accessible under:
+
+- `/_synapse/admin/v1`
+
+The only exception is the [/admin/whois]{.title-ref} endpoint, which is
+[also available via the client-server
+API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
+
+The deprecation of the old endpoints was announced with Synapse 1.20.0
+(released on 2020-09-22) and makes it easier for homeserver admins to
+lock down external access to the Admin API endpoints.
+
+# Upgrading to v1.23.0
+
+## Structured logging configuration breaking changes
+
+This release deprecates use of the `structured: true` logging
+configuration for structured logging. If your logging configuration
+contains `structured: true` then it should be modified based on the
+[structured logging
+documentation](../structured_logging.md).
+
+The `structured` and `drains` logging options are now deprecated and
+should be replaced by standard logging configuration of `handlers` and
+`formatters`.
+
+A future will release of Synapse will make using `structured: true` an
+error.
+
+# Upgrading to v1.22.0
+
+## ThirdPartyEventRules breaking changes
+
+This release introduces a backwards-incompatible change to modules
+making use of `ThirdPartyEventRules` in Synapse. If you make use of a
+module defined under the `third_party_event_rules` config option, please
+make sure it is updated to handle the below change:
+
+The `http_client` argument is no longer passed to modules as they are
+initialised. Instead, modules are expected to make use of the
+`http_client` property on the `ModuleApi` class. Modules are now passed
+a `module_api` argument during initialisation, which is an instance of
+`ModuleApi`. `ModuleApi` instances have a `http_client` property which
+acts the same as the `http_client` argument previously passed to
+`ThirdPartyEventRules` modules.
+
+# Upgrading to v1.21.0
+
+## Forwarding `/_synapse/client` through your reverse proxy
+
+The [reverse proxy
+documentation](https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md)
+has been updated to include reverse proxy directives for
+`/_synapse/client/*` endpoints. As the user password reset flow now uses
+endpoints under this prefix, **you must update your reverse proxy
+configurations for user password reset to work**.
+
+Additionally, note that the [Synapse worker documentation](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md) has been updated to
+
+: state that the `/_synapse/client/password_reset/email/submit_token`
+ endpoint can be handled
+
+by all workers. If you make use of Synapse's worker feature, please
+update your reverse proxy configuration to reflect this change.
+
+## New HTML templates
+
+A new HTML template,
+[password_reset_confirmation.html](https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html),
+has been added to the `synapse/res/templates` directory. If you are
+using a custom template directory, you may want to copy the template
+over and modify it.
+
+Note that as of v1.20.0, templates do not need to be included in custom
+template directories for Synapse to start. The default templates will be
+used if a custom template cannot be found.
+
+This page will appear to the user after clicking a password reset link
+that has been emailed to them.
+
+To complete password reset, the page must include a way to make a
+[POST]{.title-ref} request to
+`/_synapse/client/password_reset/{medium}/submit_token` with the query
+parameters from the original link, presented as a URL-encoded form. See
+the file itself for more details.
+
+## Updated Single Sign-on HTML Templates
+
+The `saml_error.html` template was removed from Synapse and replaced
+with the `sso_error.html` template. If your Synapse is configured to use
+SAML and a custom `sso_redirect_confirm_template_dir` configuration then
+any customisations of the `saml_error.html` template will need to be
+merged into the `sso_error.html` template. These templates are similar,
+but the parameters are slightly different:
+
+- The `msg` parameter should be renamed to `error_description`.
+- There is no longer a `code` parameter for the response code.
+- A string `error` parameter is available that includes a short hint
+ of why a user is seeing the error page.
+
+# Upgrading to v1.18.0
+
+## Docker [-py3]{.title-ref} suffix will be removed in future versions
+
+From 10th August 2020, we will no longer publish Docker images with the
+[-py3]{.title-ref} tag suffix. The images tagged with the
+[-py3]{.title-ref} suffix have been identical to the non-suffixed tags
+since release 0.99.0, and the suffix is obsolete.
+
+On 10th August, we will remove the [latest-py3]{.title-ref} tag.
+Existing per-release tags (such as [v1.18.0-py3]{.title-ref}) will not
+be removed, but no new [-py3]{.title-ref} tags will be added.
+
+Scripts relying on the [-py3]{.title-ref} suffix will need to be
+updated.
+
+## Redis replication is now recommended in lieu of TCP replication
+
+When setting up worker processes, we now recommend the use of a Redis
+server for replication. **The old direct TCP connection method is
+deprecated and will be removed in a future release.** See
+[workers](../workers.md) for more details.
+
+# Upgrading to v1.14.0
+
+This version includes a database update which is run as part of the
+upgrade, and which may take a couple of minutes in the case of a large
+server. Synapse will not respond to HTTP requests while this update is
+taking place.
+
+# Upgrading to v1.13.0
+
+## Incorrect database migration in old synapse versions
+
+A bug was introduced in Synapse 1.4.0 which could cause the room
+directory to be incomplete or empty if Synapse was upgraded directly
+from v1.2.1 or earlier, to versions between v1.4.0 and v1.12.x.
+
+This will *not* be a problem for Synapse installations which were:
+
+: - created at v1.4.0 or later,
+ - upgraded via v1.3.x, or
+ - upgraded straight from v1.2.1 or earlier to v1.13.0 or later.
+
+If completeness of the room directory is a concern, installations which
+are affected can be repaired as follows:
+
+1. Run the following sql from a [psql]{.title-ref} or
+ [sqlite3]{.title-ref} console:
+
+ ```sql
+ INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_rooms', '{}', 'current_state_events_membership');
+
+ INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
+ ```
+
+2. Restart synapse.
+
+## New Single Sign-on HTML Templates
+
+New templates (`sso_auth_confirm.html`, `sso_auth_success.html`, and
+`sso_account_deactivated.html`) were added to Synapse. If your Synapse
+is configured to use SSO and a custom
+`sso_redirect_confirm_template_dir` configuration then these templates
+will need to be copied from
+[synapse/res/templates](synapse/res/templates) into that directory.
+
+## Synapse SSO Plugins Method Deprecation
+
+Plugins using the `complete_sso_login` method of
+`synapse.module_api.ModuleApi` should update to using the async/await
+version `complete_sso_login_async` which includes additional checks. The
+non-async version is considered deprecated.
+
+## Rolling back to v1.12.4 after a failed upgrade
+
+v1.13.0 includes a lot of large changes. If something problematic
+occurs, you may want to roll-back to a previous version of Synapse.
+Because v1.13.0 also includes a new database schema version, reverting
+that version is also required alongside the generic rollback
+instructions mentioned above. In short, to roll back to v1.12.4 you need
+to:
+
+1. Stop the server
+
+2. Decrease the schema version in the database:
+
+ ```sql
+ UPDATE schema_version SET version = 57;
+ ```
+
+3. Downgrade Synapse by following the instructions for your
+ installation method in the "Rolling back to older versions"
+ section above.
+
+# Upgrading to v1.12.0
+
+This version includes a database update which is run as part of the
+upgrade, and which may take some time (several hours in the case of a
+large server). Synapse will not respond to HTTP requests while this
+update is taking place.
+
+This is only likely to be a problem in the case of a server which is
+participating in many rooms.
+
+0. As with all upgrades, it is recommended that you have a recent
+ backup of your database which can be used for recovery in the event
+ of any problems.
+
+1. As an initial check to see if you will be affected, you can try
+ running the following query from the [psql]{.title-ref} or
+ [sqlite3]{.title-ref} console. It is safe to run it while Synapse is
+ still running.
+
+ ```sql
+ SELECT MAX(q.v) FROM (
+ SELECT (
+ SELECT ej.json AS v
+ FROM state_events se INNER JOIN event_json ej USING (event_id)
+ WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
+ LIMIT 1
+ ) FROM rooms WHERE rooms.room_version IS NULL
+ ) q;
+ ```
+
+ This query will take about the same amount of time as the upgrade
+ process: ie, if it takes 5 minutes, then it is likely that Synapse
+ will be unresponsive for 5 minutes during the upgrade.
+
+ If you consider an outage of this duration to be acceptable, no
+ further action is necessary and you can simply start Synapse 1.12.0.
+
+ If you would prefer to reduce the downtime, continue with the steps
+ below.
+
+2. The easiest workaround for this issue is to manually create a new
+ index before upgrading. On PostgreSQL, his can be done as follows:
+
+ ```sql
+ CREATE INDEX CONCURRENTLY tmp_upgrade_1_12_0_index
+ ON state_events(room_id) WHERE type = 'm.room.create';
+ ```
+
+ The above query may take some time, but is also safe to run while
+ Synapse is running.
+
+ We assume that no SQLite users have databases large enough to be
+ affected. If you *are* affected, you can run a similar query,
+ omitting the `CONCURRENTLY` keyword. Note however that this
+ operation may in itself cause Synapse to stop running for some time.
+ Synapse admins are reminded that [SQLite is not recommended for use
+ outside a test
+ environment](https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql).
+
+3. Once the index has been created, the `SELECT` query in step 1 above
+ should complete quickly. It is therefore safe to upgrade to Synapse
+ 1.12.0.
+
+4. Once Synapse 1.12.0 has successfully started and is responding to
+ HTTP requests, the temporary index can be removed:
+
+ ```sql
+ DROP INDEX tmp_upgrade_1_12_0_index;
+ ```
+
+# Upgrading to v1.10.0
+
+Synapse will now log a warning on start up if used with a PostgreSQL
+database that has a non-recommended locale set.
+
+See [Postgres](../postgres.md) for details.
+
+# Upgrading to v1.8.0
+
+Specifying a `log_file` config option will now cause Synapse to refuse
+to start, and should be replaced by with the `log_config` option.
+Support for the `log_file` option was removed in v1.3.0 and has since
+had no effect.
+
+# Upgrading to v1.7.0
+
+In an attempt to configure Synapse in a privacy preserving way, the
+default behaviours of `allow_public_rooms_without_auth` and
+`allow_public_rooms_over_federation` have been inverted. This means that
+by default, only authenticated users querying the Client/Server API will
+be able to query the room directory, and relatedly that the server will
+not share room directory information with other servers over federation.
+
+If your installation does not explicitly set these settings one way or
+the other and you want either setting to be `true` then it will
+necessary to update your homeserver configuration file accordingly.
+
+For more details on the surrounding context see our
+[explainer](https://matrix.org/blog/2019/11/09/avoiding-unwelcome-visitors-on-private-matrix-servers).
+
+# Upgrading to v1.5.0
+
+This release includes a database migration which may take several
+minutes to complete if there are a large number (more than a million or
+so) of entries in the `devices` table. This is only likely to a be a
+problem on very large installations.
+
+# Upgrading to v1.4.0
+
+## New custom templates
+
+If you have configured a custom template directory with the
+`email.template_dir` option, be aware that there are new templates
+regarding registration and threepid management (see below) that must be
+included.
+
+- `registration.html` and `registration.txt`
+- `registration_success.html` and `registration_failure.html`
+- `add_threepid.html` and `add_threepid.txt`
+- `add_threepid_failure.html` and `add_threepid_success.html`
+
+Synapse will expect these files to exist inside the configured template
+directory, and **will fail to start** if they are absent. To view the
+default templates, see
+[synapse/res/templates](https://github.com/matrix-org/synapse/tree/master/synapse/res/templates).
+
+## 3pid verification changes
+
+**Note: As of this release, users will be unable to add phone numbers or
+email addresses to their accounts, without changes to the Synapse
+configuration. This includes adding an email address during
+registration.**
+
+It is possible for a user to associate an email address or phone number
+with their account, for a number of reasons:
+
+- for use when logging in, as an alternative to the user id.
+- in the case of email, as an alternative contact to help with account
+ recovery.
+- in the case of email, to receive notifications of missed messages.
+
+Before an email address or phone number can be added to a user's
+account, or before such an address is used to carry out a
+password-reset, Synapse must confirm the operation with the owner of the
+email address or phone number. It does this by sending an email or text
+giving the user a link or token to confirm receipt. This process is
+known as '3pid verification'. ('3pid', or 'threepid', stands for
+third-party identifier, and we use it to refer to external identifiers
+such as email addresses and phone numbers.)
+
+Previous versions of Synapse delegated the task of 3pid verification to
+an identity server by default. In most cases this server is `vector.im`
+or `matrix.org`.
+
+In Synapse 1.4.0, for security and privacy reasons, the homeserver will
+no longer delegate this task to an identity server by default. Instead,
+the server administrator will need to explicitly decide how they would
+like the verification messages to be sent.
+
+In the medium term, the `vector.im` and `matrix.org` identity servers
+will disable support for delegated 3pid verification entirely. However,
+in order to ease the transition, they will retain the capability for a
+limited period. Delegated email verification will be disabled on Monday
+2nd December 2019 (giving roughly 2 months notice). Disabling delegated
+SMS verification will follow some time after that once SMS verification
+support lands in Synapse.
+
+Once delegated 3pid verification support has been disabled in the
+`vector.im` and `matrix.org` identity servers, all Synapse versions that
+depend on those instances will be unable to verify email and phone
+numbers through them. There are no imminent plans to remove delegated
+3pid verification from Sydent generally. (Sydent is the identity server
+project that backs the `vector.im` and `matrix.org` instances).
+
+### Email
+
+Following upgrade, to continue verifying email (e.g. as part of the
+registration process), admins can either:-
+
+- Configure Synapse to use an email server.
+- Run or choose an identity server which allows delegated email
+ verification and delegate to it.
+
+#### Configure SMTP in Synapse
+
+To configure an SMTP server for Synapse, modify the configuration
+section headed `email`, and be sure to have at least the
+`smtp_host, smtp_port` and `notif_from` fields filled out.
+
+You may also need to set `smtp_user`, `smtp_pass`, and
+`require_transport_security`.
+
+See the [sample configuration file](docs/sample_config.yaml) for more
+details on these settings.
+
+#### Delegate email to an identity server
+
+Some admins will wish to continue using email verification as part of
+the registration process, but will not immediately have an appropriate
+SMTP server at hand.
+
+To this end, we will continue to support email verification delegation
+via the `vector.im` and `matrix.org` identity servers for two months.
+Support for delegated email verification will be disabled on Monday 2nd
+December.
+
+The `account_threepid_delegates` dictionary defines whether the
+homeserver should delegate an external server (typically an [identity
+server](https://matrix.org/docs/spec/identity_service/r0.2.1)) to handle
+sending confirmation messages via email and SMS.
+
+So to delegate email verification, in `homeserver.yaml`, set
+`account_threepid_delegates.email` to the base URL of an identity
+server. For example:
+
+```yaml
+account_threepid_delegates:
+ email: https://example.com # Delegate email sending to example.com
+```
+
+Note that `account_threepid_delegates.email` replaces the deprecated
+`email.trust_identity_server_for_password_resets`: if
+`email.trust_identity_server_for_password_resets` is set to `true`, and
+`account_threepid_delegates.email` is not set, then the first entry in
+`trusted_third_party_id_servers` will be used as the
+`account_threepid_delegate` for email. This is to ensure compatibility
+with existing Synapse installs that set up external server handling for
+these tasks before v1.4.0. If
+`email.trust_identity_server_for_password_resets` is `true` and no
+trusted identity server domains are configured, Synapse will report an
+error and refuse to start.
+
+If `email.trust_identity_server_for_password_resets` is `false` or
+absent and no `email` delegate is configured in
+`account_threepid_delegates`, then Synapse will send email verification
+messages itself, using the configured SMTP server (see above). that
+type.
+
+### Phone numbers
+
+Synapse does not support phone-number verification itself, so the only
+way to maintain the ability for users to add phone numbers to their
+accounts will be by continuing to delegate phone number verification to
+the `matrix.org` and `vector.im` identity servers (or another identity
+server that supports SMS sending).
+
+The `account_threepid_delegates` dictionary defines whether the
+homeserver should delegate an external server (typically an [identity
+server](https://matrix.org/docs/spec/identity_service/r0.2.1)) to handle
+sending confirmation messages via email and SMS.
+
+So to delegate phone number verification, in `homeserver.yaml`, set
+`account_threepid_delegates.msisdn` to the base URL of an identity
+server. For example:
+
+```yaml
+account_threepid_delegates:
+ msisdn: https://example.com # Delegate sms sending to example.com
+```
+
+The `matrix.org` and `vector.im` identity servers will continue to
+support delegated phone number verification via SMS until such time as
+it is possible for admins to configure their servers to perform phone
+number verification directly. More details will follow in a future
+release.
+
+## Rolling back to v1.3.1
+
+If you encounter problems with v1.4.0, it should be possible to roll
+back to v1.3.1, subject to the following:
+
+- The 'room statistics' engine was heavily reworked in this release
+ (see [#5971](https://github.com/matrix-org/synapse/pull/5971)),
+ including significant changes to the database schema, which are not
+ easily reverted. This will cause the room statistics engine to stop
+ updating when you downgrade.
+
+ The room statistics are essentially unused in v1.3.1 (in future
+ versions of Synapse, they will be used to populate the room
+ directory), so there should be no loss of functionality. However,
+ the statistics engine will write errors to the logs, which can be
+ avoided by setting the following in `homeserver.yaml`:
+
+ ```yaml
+ stats:
+ enabled: false
+ ```
+
+ Don't forget to re-enable it when you upgrade again, in preparation
+ for its use in the room directory!
+
+# Upgrading to v1.2.0
+
+Some counter metrics have been renamed, with the old names deprecated.
+See [the metrics
+documentation](../metrics-howto.md#renaming-of-metrics--deprecation-of-old-names-in-12)
+for details.
+
+# Upgrading to v1.1.0
+
+Synapse v1.1.0 removes support for older Python and PostgreSQL versions,
+as outlined in [our deprecation
+notice](https://matrix.org/blog/2019/04/08/synapse-deprecating-postgres-9-4-and-python-2-x).
+
+## Minimum Python Version
+
+Synapse v1.1.0 has a minimum Python requirement of Python 3.5. Python
+3.6 or Python 3.7 are recommended as they have improved internal string
+handling, significantly reducing memory usage.
+
+If you use current versions of the Matrix.org-distributed Debian
+packages or Docker images, action is not required.
+
+If you install Synapse in a Python virtual environment, please see
+"Upgrading to v0.34.0" for notes on setting up a new virtualenv under
+Python 3.
+
+## Minimum PostgreSQL Version
+
+If using PostgreSQL under Synapse, you will need to use PostgreSQL 9.5
+or above. Please see the [PostgreSQL
+documentation](https://www.postgresql.org/docs/11/upgrading.html) for
+more details on upgrading your database.
+
+# Upgrading to v1.0
+
+## Validation of TLS certificates
+
+Synapse v1.0 is the first release to enforce validation of TLS
+certificates for the federation API. It is therefore essential that your
+certificates are correctly configured. See the
+[FAQ](../MSC1711_certificates_FAQ.md) for more information.
+
+Note, v1.0 installations will also no longer be able to federate with
+servers that have not correctly configured their certificates.
+
+In rare cases, it may be desirable to disable certificate checking: for
+example, it might be essential to be able to federate with a given
+legacy server in a closed federation. This can be done in one of two
+ways:-
+
+- Configure the global switch `federation_verify_certificates` to
+ `false`.
+- Configure a whitelist of server domains to trust via
+ `federation_certificate_verification_whitelist`.
+
+See the [sample configuration file](docs/sample_config.yaml) for more
+details on these settings.
+
+## Email
+
+When a user requests a password reset, Synapse will send an email to the
+user to confirm the request.
+
+Previous versions of Synapse delegated the job of sending this email to
+an identity server. If the identity server was somehow malicious or
+became compromised, it would be theoretically possible to hijack an
+account through this means.
+
+Therefore, by default, Synapse v1.0 will send the confirmation email
+itself. If Synapse is not configured with an SMTP server, password reset
+via email will be disabled.
+
+To configure an SMTP server for Synapse, modify the configuration
+section headed `email`, and be sure to have at least the `smtp_host`,
+`smtp_port` and `notif_from` fields filled out. You may also need to set
+`smtp_user`, `smtp_pass`, and `require_transport_security`.
+
+If you are absolutely certain that you wish to continue using an
+identity server for password resets, set
+`trust_identity_server_for_password_resets` to `true`.
+
+See the [sample configuration file](docs/sample_config.yaml) for more
+details on these settings.
+
+## New email templates
+
+Some new templates have been added to the default template directory for the purpose of
+the homeserver sending its own password reset emails. If you have configured a
+custom `template_dir` in your Synapse config, these files will need to be added.
+
+`password_reset.html` and `password_reset.txt` are HTML and plain text
+templates respectively that contain the contents of what will be emailed
+to the user upon attempting to reset their password via email.
+`password_reset_success.html` and `password_reset_failure.html` are HTML
+files that the content of which (assuming no redirect URL is set) will
+be shown to the user after they attempt to click the link in the email
+sent to them.
+
+# Upgrading to v0.99.0
+
+Please be aware that, before Synapse v1.0 is released around March 2019,
+you will need to replace any self-signed certificates with those
+verified by a root CA. Information on how to do so can be found at [the
+ACME docs](../ACME.md).
+
+For more information on configuring TLS certificates see the
+[FAQ](../MSC1711_certificates_FAQ.md).
+
+# Upgrading to v0.34.0
+
+1. This release is the first to fully support Python 3. Synapse will
+ now run on Python versions 3.5, or 3.6 (as well as 2.7). We
+ recommend switching to Python 3, as it has been shown to give
+ performance improvements.
+
+ For users who have installed Synapse into a virtualenv, we recommend
+ doing this by creating a new virtualenv. For example:
+
+ virtualenv -p python3 ~/synapse/env3
+ source ~/synapse/env3/bin/activate
+ pip install matrix-synapse
+
+ You can then start synapse as normal, having activated the new
+ virtualenv:
+
+ cd ~/synapse
+ source env3/bin/activate
+ synctl start
+
+ Users who have installed from distribution packages should see the
+ relevant package documentation. See below for notes on Debian
+ packages.
+
+ - When upgrading to Python 3, you **must** make sure that your log
+ files are configured as UTF-8, by adding `encoding: utf8` to the
+ `RotatingFileHandler` configuration (if you have one) in your
+ `<server>.log.config` file. For example, if your `log.config`
+ file contains:
+
+ handlers:
+ file:
+ class: logging.handlers.RotatingFileHandler
+ formatter: precise
+ filename: homeserver.log
+ maxBytes: 104857600
+ backupCount: 10
+ filters: [context]
+ console:
+ class: logging.StreamHandler
+ formatter: precise
+ filters: [context]
+
+ Then you should update this to be:
+
+ handlers:
+ file:
+ class: logging.handlers.RotatingFileHandler
+ formatter: precise
+ filename: homeserver.log
+ maxBytes: 104857600
+ backupCount: 10
+ filters: [context]
+ encoding: utf8
+ console:
+ class: logging.StreamHandler
+ formatter: precise
+ filters: [context]
+
+ There is no need to revert this change if downgrading to
+ Python 2.
+
+ We are also making available Debian packages which will run Synapse
+ on Python 3. You can switch to these packages with
+ `apt-get install matrix-synapse-py3`, however, please read
+ [debian/NEWS](https://github.com/matrix-org/synapse/blob/release-v0.34.0/debian/NEWS)
+ before doing so. The existing `matrix-synapse` packages will
+ continue to use Python 2 for the time being.
+
+2. This release removes the `riot.im` from the default list of trusted
+ identity servers.
+
+ If `riot.im` is in your homeserver's list of
+ `trusted_third_party_id_servers`, you should remove it. It was added
+ in case a hypothetical future identity server was put there. If you
+ don't remove it, users may be unable to deactivate their accounts.
+
+3. This release no longer installs the (unmaintained) Matrix Console
+ web client as part of the default installation. It is possible to
+ re-enable it by installing it separately and setting the
+ `web_client_location` config option, but please consider switching
+ to another client.
+
+# Upgrading to v0.33.7
+
+This release removes the example email notification templates from
+`res/templates` (they are now internal to the python package). This
+should only affect you if you (a) deploy your Synapse instance from a
+git checkout or a github snapshot URL, and (b) have email notifications
+enabled.
+
+If you have email notifications enabled, you should ensure that
+`email.template_dir` is either configured to point at a directory where
+you have installed customised templates, or leave it unset to use the
+default templates.
+
+# Upgrading to v0.27.3
+
+This release expands the anonymous usage stats sent if the opt-in
+`report_stats` configuration is set to `true`. We now capture RSS memory
+and cpu use at a very coarse level. This requires administrators to
+install the optional `psutil` python module.
+
+We would appreciate it if you could assist by ensuring this module is
+available and `report_stats` is enabled. This will let us see if
+performance changes to synapse are having an impact to the general
+community.
+
+# Upgrading to v0.15.0
+
+If you want to use the new URL previewing API
+(`/_matrix/media/r0/preview_url`) then you have to explicitly enable it
+in the config and update your dependencies dependencies. See README.rst
+for details.
+
+# Upgrading to v0.11.0
+
+This release includes the option to send anonymous usage stats to
+matrix.org, and requires that administrators explictly opt in or out by
+setting the `report_stats` option to either `true` or `false`.
+
+We would really appreciate it if you could help our project out by
+reporting anonymized usage statistics from your homeserver. Only very
+basic aggregate data (e.g. number of users) will be reported, but it
+helps us to track the growth of the Matrix community, and helps us to
+make Matrix a success, as well as to convince other networks that they
+should peer with us.
+
+# Upgrading to v0.9.0
+
+Application services have had a breaking API change in this version.
+
+They can no longer register themselves with a home server using the AS
+HTTP API. This decision was made because a compromised application
+service with free reign to register any regex in effect grants full
+read/write access to the home server if a regex of `.*` is used. An
+attack where a compromised AS re-registers itself with `.*` was deemed
+too big of a security risk to ignore, and so the ability to register
+with the HS remotely has been removed.
+
+It has been replaced by specifying a list of application service
+registrations in `homeserver.yaml`:
+
+ app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
+
+Where `registration-01.yaml` looks like:
+
+ url: <String> # e.g. "https://my.application.service.com"
+ as_token: <String>
+ hs_token: <String>
+ sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
+ namespaces:
+ users:
+ - exclusive: <Boolean>
+ regex: <String> # e.g. "@prefix_.*"
+ aliases:
+ - exclusive: <Boolean>
+ regex: <String>
+ rooms:
+ - exclusive: <Boolean>
+ regex: <String>
+
+# Upgrading to v0.8.0
+
+Servers which use captchas will need to add their public key to:
+
+ static/client/register/register_config.js
+
+ window.matrixRegistrationConfig = {
+ recaptcha_public_key: "YOUR_PUBLIC_KEY"
+ };
+
+This is required in order to support registration fallback (typically
+used on mobile devices).
+
+# Upgrading to v0.7.0
+
+New dependencies are:
+
+- pydenticon
+- simplejson
+- syutil
+- matrix-angular-sdk
+
+To pull in these dependencies in a virtual env, run:
+
+ python synapse/python_dependencies.py | xargs -n 1 pip install
+
+# Upgrading to v0.6.0
+
+To pull in new dependencies, run:
+
+ python setup.py develop --user
+
+This update includes a change to the database schema. To upgrade you
+first need to upgrade the database by running:
+
+ python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
+
+Where [<db>]{.title-ref} is the location of the database,
+[<server_name>]{.title-ref} is the server name as specified in the
+synapse configuration, and [<signing_key>]{.title-ref} is the location
+of the signing key as specified in the synapse configuration.
+
+This may take some time to complete. Failures of signatures and content
+hashes can safely be ignored.
+
+# Upgrading to v0.5.1
+
+Depending on precisely when you installed v0.5.0 you may have ended up
+with a stale release of the reference matrix webclient installed as a
+python module. To uninstall it and ensure you are depending on the
+latest module, please run:
+
+ $ pip uninstall syweb
+
+# Upgrading to v0.5.0
+
+The webclient has been split out into a seperate repository/pacakage in
+this release. Before you restart your homeserver you will need to pull
+in the webclient package by running:
+
+ python setup.py develop --user
+
+This release completely changes the database schema and so requires
+upgrading it before starting the new version of the homeserver.
+
+The script "database-prepare-for-0.5.0.sh" should be used to upgrade
+the database. This will save all user information, such as logins and
+profiles, but will otherwise purge the database. This includes messages,
+which rooms the home server was a member of and room alias mappings.
+
+If you would like to keep your history, please take a copy of your
+database file and ask for help in #matrix:matrix.org. The upgrade
+process is, unfortunately, non trivial and requires human intervention
+to resolve any resulting conflicts during the upgrade process.
+
+Before running the command the homeserver should be first completely
+shutdown. To run it, simply specify the location of the database, e.g.:
+
+> ./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
+
+Once this has successfully completed it will be safe to restart the
+homeserver. You may notice that the homeserver takes a few seconds
+longer to restart than usual as it reinitializes the database.
+
+On startup of the new version, users can either rejoin remote rooms
+using room aliases or by being reinvited. Alternatively, if any other
+homeserver sends a message to a room that the homeserver was previously
+in the local HS will automatically rejoin the room.
+
+# Upgrading to v0.4.0
+
+This release needs an updated syutil version. Run:
+
+ python setup.py develop
+
+You will also need to upgrade your configuration as the signing key
+format has changed. Run:
+
+ python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
+
+# Upgrading to v0.3.0
+
+This registration API now closely matches the login API. This introduces
+a bit more backwards and forwards between the HS and the client, but
+this improves the overall flexibility of the API. You can now GET on
+/register to retrieve a list of valid registration flows. Upon choosing
+one, they are submitted in the same way as login, e.g:
+
+ {
+ type: m.login.password,
+ user: foo,
+ password: bar
+ }
+
+The default HS supports 2 flows, with and without Identity Server email
+authentication. Enabling captcha on the HS will add in an extra step to
+all flows: `m.login.recaptcha` which must be completed before you can
+transition to the next stage. There is a new login type:
+`m.login.email.identity` which contains the `threepidCreds` key which
+were previously sent in the original register request. For more
+information on this, see the specification.
+
+## Web Client
+
+The VoIP specification has changed between v0.2.0 and v0.3.0. Users
+should refresh any browser tabs to get the latest web client code. Users
+on v0.2.0 of the web client will not be able to call those on v0.3.0 and
+vice versa.
+
+# Upgrading to v0.2.0
+
+The home server now requires setting up of SSL config before it can run.
+To automatically generate default config use:
+
+ $ python synapse/app/homeserver.py \
+ --server-name machine.my.domain.name \
+ --bind-port 8448 \
+ --config-path homeserver.config \
+ --generate-config
+
+This config can be edited if desired, for example to specify a different
+SSL certificate to use. Once done you can run the home server using:
+
+ $ python synapse/app/homeserver.py --config-path homeserver.config
+
+See the README.rst for more information.
+
+Also note that some config options have been renamed, including:
+
+- "host" to "server-name"
+- "database" to "database-path"
+- "port" to "bind-port" and "unsecure-port"
+
+# Upgrading to v0.0.1
+
+This release completely changes the database schema and so requires
+upgrading it before starting the new version of the homeserver.
+
+The script "database-prepare-for-0.0.1.sh" should be used to upgrade
+the database. This will save all user information, such as logins and
+profiles, but will otherwise purge the database. This includes messages,
+which rooms the home server was a member of and room alias mappings.
+
+Before running the command the homeserver should be first completely
+shutdown. To run it, simply specify the location of the database, e.g.:
+
+> ./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
+
+Once this has successfully completed it will be safe to restart the
+homeserver. You may notice that the homeserver takes a few seconds
+longer to restart than usual as it reinitializes the database.
+
+On startup of the new version, users can either rejoin remote rooms
+using room aliases or by being reinvited. Alternatively, if any other
+homeserver sends a message to a room that the homeserver was previously
+in the local HS will automatically rejoin the room.
diff --git a/docs/upgrading/README.md b/docs/upgrading/README.md
deleted file mode 100644
index 258e58cf..00000000
--- a/docs/upgrading/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-<!--
- Include the contents of UPGRADE.rst from the project root without moving it, which may
- break links around the internet. Additionally, note that SUMMARY.md is unable to
- directly link to content outside of the docs/ directory. So we use this file as a
- redirection.
--->
-{{#include ../../UPGRADE.rst}} \ No newline at end of file
diff --git a/mypy.ini b/mypy.ini
index c4ff0e66..72ce932d 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -75,6 +75,7 @@ files =
synapse/util/daemonize.py,
synapse/util/hash.py,
synapse/util/iterutils.py,
+ synapse/util/linked_list.py,
synapse/util/metrics.py,
synapse/util/macaroons.py,
synapse/util/module_loader.py,
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages
index 546724f8..e25c5bb2 100755
--- a/scripts-dev/build_debian_packages
+++ b/scripts-dev/build_debian_packages
@@ -10,6 +10,7 @@
# can be passed on the commandline for debugging.
import argparse
+import json
import os
import signal
import subprocess
@@ -34,6 +35,8 @@ By default, builds for all known distributions, but a list of distributions
can be passed on the commandline for debugging.
"""
+projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+
class Builder(object):
def __init__(self, redirect_stdout=False):
@@ -57,9 +60,6 @@ class Builder(object):
raise
def _inner_build(self, dist, skip_tests=False):
- projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- os.chdir(projdir)
-
tag = dist.split(":", 1)[1]
# Make the dir where the debs will live.
@@ -93,6 +93,7 @@ class Builder(object):
],
stdout=stdout,
stderr=subprocess.STDOUT,
+ cwd=projdir,
)
container_name = "synapse_build_" + tag
@@ -180,10 +181,18 @@ if __name__ == "__main__":
help="skip running tests after building",
)
parser.add_argument(
+ "--show-dists-json",
+ action="store_true",
+ help="instead of building the packages, just list the dists to build for, as a json array",
+ )
+ parser.add_argument(
"dist",
nargs="*",
default=DISTS,
help="a list of distributions to build for. Default: %(default)s",
)
args = parser.parse_args()
- run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
+ if args.show_dists_json:
+ print(json.dumps(DISTS))
+ else:
+ run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index ba060104..aca32edc 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -65,4 +65,4 @@ if [[ -n "$1" ]]; then
fi
# Run the tests!
-go test -v -tags synapse_blacklist,msc2946,msc3083,msc2716 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
+go test -v -tags synapse_blacklist,msc2946,msc3083,msc2716,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index fc3df907..5bfaa4ad 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -83,12 +83,6 @@ def run():
if current_version.pre:
# If the current version is an RC we don't need to bump any of the
# version numbers (other than the RC number).
- base_version = "{}.{}.{}".format(
- current_version.major,
- current_version.minor,
- current_version.micro,
- )
-
if rc:
new_version = "{}.{}.{}rc{}".format(
current_version.major,
@@ -97,49 +91,57 @@ def run():
current_version.pre[1] + 1,
)
else:
- new_version = base_version
+ new_version = "{}.{}.{}".format(
+ current_version.major,
+ current_version.minor,
+ current_version.micro,
+ )
else:
- # If this is a new release cycle then we need to know if its a major
- # version bump or a hotfix.
+ # If this is a new release cycle then we need to know if it's a minor
+ # or a patch version bump.
release_type = click.prompt(
"Release type",
- type=click.Choice(("major", "hotfix")),
+ type=click.Choice(("minor", "patch")),
show_choices=True,
- default="major",
+ default="minor",
)
- if release_type == "major":
- base_version = new_version = "{}.{}.{}".format(
- current_version.major,
- current_version.minor + 1,
- 0,
- )
+ if release_type == "minor":
if rc:
new_version = "{}.{}.{}rc1".format(
current_version.major,
current_version.minor + 1,
0,
)
-
+ else:
+ new_version = "{}.{}.{}".format(
+ current_version.major,
+ current_version.minor + 1,
+ 0,
+ )
else:
- base_version = new_version = "{}.{}.{}".format(
- current_version.major,
- current_version.minor,
- current_version.micro + 1,
- )
if rc:
new_version = "{}.{}.{}rc1".format(
current_version.major,
current_version.minor,
current_version.micro + 1,
)
+ else:
+ new_version = "{}.{}.{}".format(
+ current_version.major,
+ current_version.minor,
+ current_version.micro + 1,
+ )
# Confirm the calculated version is OK.
if not click.confirm(f"Create new version: {new_version}?", default=True):
click.get_current_context().abort()
# Switch to the release branch.
- release_branch_name = f"release-v{current_version.major}.{current_version.minor}"
+ parsed_new_version = version.parse(new_version)
+ release_branch_name = (
+ f"release-v{parsed_new_version.major}.{parsed_new_version.minor}"
+ )
release_branch = find_ref(repo, release_branch_name)
if release_branch:
if release_branch.is_remote():
@@ -153,7 +155,7 @@ def run():
# release type.
if current_version.is_prerelease:
default = release_branch_name
- elif release_type == "major":
+ elif release_type == "minor":
default = "develop"
else:
default = "master"
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 86eb76cb..2bbaf555 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -93,6 +93,7 @@ BOOLEAN_COLUMNS = {
"local_media_repository": ["safe_from_quarantine"],
"users": ["shadow_banned"],
"e2e_fallback_keys_json": ["used"],
+ "access_tokens": ["used"],
}
@@ -307,7 +308,8 @@ class Porter(object):
information_schema.table_constraints AS tc
INNER JOIN information_schema.constraint_column_usage AS ccu
USING (table_schema, constraint_name)
- WHERE tc.constraint_type = 'FOREIGN KEY';
+ WHERE tc.constraint_type = 'FOREIGN KEY'
+ AND tc.table_name != ccu.table_name;
"""
txn.execute(sql)
diff --git a/scripts/synapse_review_recent_signups b/scripts/synapse_review_recent_signups
new file mode 100755
index 00000000..a36d46e1
--- /dev/null
+++ b/scripts/synapse_review_recent_signups
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse._scripts.review_recent_signups import main
+
+if __name__ == "__main__":
+ main()
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 1bd03462..5ecce24e 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.37.1"
+__version__ = "1.38.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py
new file mode 100644
index 00000000..01dc0c42
--- /dev/null
+++ b/synapse/_scripts/review_recent_signups.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import sys
+import time
+from datetime import datetime
+from typing import List
+
+import attr
+
+from synapse.config._base import RootConfig, find_config_files, read_config_files
+from synapse.config.database import DatabaseConfig
+from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
+from synapse.storage.engines import create_engine
+
+
+class ReviewConfig(RootConfig):
+ "A config class that just pulls out the database config"
+ config_classes = [DatabaseConfig]
+
+
+@attr.s(auto_attribs=True)
+class UserInfo:
+ user_id: str
+ creation_ts: int
+ emails: List[str] = attr.Factory(list)
+ private_rooms: List[str] = attr.Factory(list)
+ public_rooms: List[str] = attr.Factory(list)
+ ips: List[str] = attr.Factory(list)
+
+
+def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]:
+ """Fetches recently registered users and some info on them."""
+
+ sql = """
+ SELECT name, creation_ts FROM users
+ WHERE
+ ? <= creation_ts
+ AND deactivated = 0
+ """
+
+ txn.execute(sql, (since_ms / 1000,))
+
+ user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn]
+
+ for user_info in user_infos:
+ user_info.emails = DatabasePool.simple_select_onecol_txn(
+ txn,
+ table="user_threepids",
+ keyvalues={"user_id": user_info.user_id, "medium": "email"},
+ retcol="address",
+ )
+
+ sql = """
+ SELECT room_id, canonical_alias, name, join_rules
+ FROM local_current_membership
+ INNER JOIN room_stats_state USING (room_id)
+ WHERE user_id = ? AND membership = 'join'
+ """
+
+ txn.execute(sql, (user_info.user_id,))
+ for room_id, canonical_alias, name, join_rules in txn:
+ if join_rules == "public":
+ user_info.public_rooms.append(canonical_alias or name or room_id)
+ else:
+ user_info.private_rooms.append(canonical_alias or name or room_id)
+
+ user_info.ips = DatabasePool.simple_select_onecol_txn(
+ txn,
+ table="user_ips",
+ keyvalues={"user_id": user_info.user_id},
+ retcol="ip",
+ )
+
+ return user_infos
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-c",
+ "--config-path",
+ action="append",
+ metavar="CONFIG_FILE",
+ help="The config files for Synapse.",
+ required=True,
+ )
+ parser.add_argument(
+ "-s",
+ "--since",
+ metavar="duration",
+ help="Specify how far back to review user registrations for, defaults to 7d (i.e. 7 days).",
+ default="7d",
+ )
+ parser.add_argument(
+ "-e",
+ "--exclude-emails",
+ action="store_true",
+ help="Exclude users that have validated email addresses",
+ )
+ parser.add_argument(
+ "-u",
+ "--only-users",
+ action="store_true",
+ help="Only print user IDs that match.",
+ )
+
+ config = ReviewConfig()
+
+ config_args = parser.parse_args(sys.argv[1:])
+ config_files = find_config_files(search_paths=config_args.config_path)
+ config_dict = read_config_files(config_files)
+ config.parse_config_dict(
+ config_dict,
+ )
+
+ since_ms = time.time() * 1000 - config.parse_duration(config_args.since)
+ exclude_users_with_email = config_args.exclude_emails
+ include_context = not config_args.only_users
+
+ for database_config in config.database.databases:
+ if "main" in database_config.databases:
+ break
+
+ engine = create_engine(database_config.config)
+
+ with make_conn(database_config, engine, "review_recent_signups") as db_conn:
+ user_infos = get_recent_users(db_conn.cursor(), since_ms)
+
+ for user_info in user_infos:
+ if exclude_users_with_email and user_info.emails:
+ continue
+
+ if include_context:
+ print_public_rooms = ""
+ if user_info.public_rooms:
+ print_public_rooms = "(" + ", ".join(user_info.public_rooms[:3])
+
+ if len(user_info.public_rooms) > 3:
+ print_public_rooms += ", ..."
+
+ print_public_rooms += ")"
+
+ print("# Created:", datetime.fromtimestamp(user_info.creation_ts))
+ print("# Email:", ", ".join(user_info.emails) or "None")
+ print("# IPs:", ", ".join(user_info.ips))
+ print(
+ "# Number joined public rooms:",
+ len(user_info.public_rooms),
+ print_public_rooms,
+ )
+ print("# Number joined private rooms:", len(user_info.private_rooms))
+ print("#")
+
+ print(user_info.user_id)
+
+ if include_context:
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index edf1b918..307f5f9a 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
import pymacaroons
from netaddr import IPAddress
@@ -28,7 +28,6 @@ from synapse.api.errors import (
InvalidClientTokenError,
MissingClientTokenError,
)
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.http import get_request_user_agent
@@ -38,7 +37,6 @@ from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import Requester, StateMap, UserID, create_requester
from synapse.util.caches.lrucache import LruCache
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
-from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -46,15 +44,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-AuthEventTypes = (
- EventTypes.Create,
- EventTypes.Member,
- EventTypes.PowerLevels,
- EventTypes.JoinRules,
- EventTypes.RoomHistoryVisibility,
- EventTypes.ThirdPartyInvite,
-)
-
# guests always get this device id.
GUEST_DEVICE_ID = "guest_device"
@@ -65,9 +54,7 @@ class _InvalidMacaroonException(Exception):
class Auth:
"""
- FIXME: This class contains a mix of functions for authenticating users
- of our client-server API and authenticating events added to room graphs.
- The latter should be moved to synapse.handlers.event_auth.EventAuthHandler.
+ This class contains functions for authenticating users of our client-server API.
"""
def __init__(self, hs: "HomeServer"):
@@ -89,18 +76,6 @@ class Auth:
self._macaroon_secret_key = hs.config.macaroon_secret_key
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
- async def check_from_context(
- self, room_version: str, event, context, do_sig_check=True
- ) -> None:
- auth_event_ids = event.auth_event_ids()
- auth_events_by_id = await self.store.get_events(auth_event_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
-
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- event_auth.check(
- room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
- )
-
async def check_user_in_room(
self,
room_id: str,
@@ -151,13 +126,6 @@ class Auth:
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
- async def check_host_in_room(self, room_id: str, host: str) -> bool:
- with Measure(self.clock, "check_host_in_room"):
- return await self.store.is_host_joined(room_id, host)
-
- def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]:
- return event_auth.get_public_keys(invite_event)
-
async def get_user_by_req(
self,
request: SynapseRequest,
@@ -245,6 +213,11 @@ class Auth:
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
)
+ # Mark the token as used. This is used to invalidate old refresh
+ # tokens after some time.
+ if not user_info.token_used and token_id is not None:
+ await self.store.mark_access_token_as_used(token_id)
+
requester = create_requester(
user_info.user_id,
token_id,
@@ -483,44 +456,6 @@ class Auth:
"""
return await self.store.is_server_admin(user)
- def compute_auth_events(
- self,
- event,
- current_state_ids: StateMap[str],
- for_verification: bool = False,
- ) -> List[str]:
- """Given an event and current state return the list of event IDs used
- to auth an event.
-
- If `for_verification` is False then only return auth events that
- should be added to the event's `auth_events`.
-
- Returns:
- List of event IDs.
- """
-
- if event.type == EventTypes.Create:
- return []
-
- # Currently we ignore the `for_verification` flag even though there are
- # some situations where we can drop particular auth events when adding
- # to the event's `auth_events` (e.g. joins pointing to previous joins
- # when room is publicly joinable). Dropping event IDs has the
- # advantage that the auth chain for the room grows slower, but we use
- # the auth chain in state resolution v2 to order events, which means
- # care must be taken if dropping events to ensure that it doesn't
- # introduce undesirable "state reset" behaviour.
- #
- # All of which sounds a bit tricky so we don't bother for now.
-
- auth_ids = []
- for etype, state_key in event_auth.auth_types_for_event(event):
- auth_ev_id = current_state_ids.get((etype, state_key))
- if auth_ev_id:
- auth_ids.append(auth_ev_id)
-
- return auth_ids
-
async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
"""Determine whether the user is allowed to edit the room's entry in the
published room list.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 414e4c01..8363c2bb 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -201,6 +201,12 @@ class EventContentFields:
)
+class RoomTypes:
+ """Understood values of the room_type field of m.room.create events."""
+
+ SPACE = "m.space"
+
+
class RoomEncryptionAlgorithms:
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
DEFAULT = MEGOLM_V1_AES_SHA2
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 88791368..b30571fe 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -21,7 +21,7 @@ import socket
import sys
import traceback
import warnings
-from typing import Awaitable, Callable, Iterable
+from typing import TYPE_CHECKING, Awaitable, Callable, Iterable
from cryptography.utils import CryptographyDeprecationWarning
from typing_extensions import NoReturn
@@ -41,10 +41,14 @@ from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
+from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
from synapse.util.daemonize import daemonize_process
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
# list of tuples of function, args list, kwargs dict
@@ -312,7 +316,7 @@ def refresh_certificate(hs):
logger.info("Context factories updated.")
-async def start(hs: "synapse.server.HomeServer"):
+async def start(hs: "HomeServer"):
"""
Start a Synapse server or worker.
@@ -365,6 +369,9 @@ async def start(hs: "synapse.server.HomeServer"):
load_legacy_spam_checkers(hs)
+ # If we've configured an expiry time for caches, start the background job now.
+ setup_expire_lru_cache_entries(hs)
+
# It is now safe to start your Synapse.
hs.start_listening()
hs.get_datastore().db_pool.start_profiling()
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 23ca0c83..06fbd116 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -5,6 +5,7 @@ from synapse.config import (
api,
appservice,
auth,
+ cache,
captcha,
cas,
consent,
@@ -88,6 +89,7 @@ class RootConfig:
tracer: tracer.TracerConfig
redis: redis.RedisConfig
modules: modules.ModulesConfig
+ caches: cache.CacheConfig
federation: federation.FederationConfig
config_classes: List = ...
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 91165ee1..7789b403 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -116,35 +116,41 @@ class CacheConfig(Config):
#event_cache_size: 10K
caches:
- # Controls the global cache factor, which is the default cache factor
- # for all caches if a specific factor for that cache is not otherwise
- # set.
- #
- # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
- # variable. Setting by environment variable takes priority over
- # setting through the config file.
- #
- # Defaults to 0.5, which will half the size of all caches.
- #
- #global_factor: 1.0
-
- # A dictionary of cache name to cache factor for that individual
- # cache. Overrides the global cache factor for a given cache.
- #
- # These can also be set through environment variables comprised
- # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
- # letters and underscores. Setting by environment variable
- # takes priority over setting through the config file.
- # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
- #
- # Some caches have '*' and other characters that are not
- # alphanumeric or underscores. These caches can be named with or
- # without the special characters stripped. For example, to specify
- # the cache factor for `*stateGroupCache*` via an environment
- # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
- #
- per_cache_factors:
- #get_users_who_share_room_with_user: 2.0
+ # Controls the global cache factor, which is the default cache factor
+ # for all caches if a specific factor for that cache is not otherwise
+ # set.
+ #
+ # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
+ # variable. Setting by environment variable takes priority over
+ # setting through the config file.
+ #
+ # Defaults to 0.5, which will half the size of all caches.
+ #
+ #global_factor: 1.0
+
+ # A dictionary of cache name to cache factor for that individual
+ # cache. Overrides the global cache factor for a given cache.
+ #
+ # These can also be set through environment variables comprised
+ # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
+ # letters and underscores. Setting by environment variable
+ # takes priority over setting through the config file.
+ # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
+ #
+ # Some caches have '*' and other characters that are not
+ # alphanumeric or underscores. These caches can be named with or
+ # without the special characters stripped. For example, to specify
+ # the cache factor for `*stateGroupCache*` via an environment
+ # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
+ #
+ per_cache_factors:
+ #get_users_who_share_room_with_user: 2.0
+
+ # Controls how long an entry can be in a cache without having been
+ # accessed before being evicted. Defaults to None, which means
+ # entries are never evicted based on time.
+ #
+ #expiry_time: 30m
"""
def read_config(self, config, **kwargs):
@@ -200,6 +206,12 @@ class CacheConfig(Config):
e.message # noqa: B306, DependencyException.message is a property
)
+ expiry_time = cache_config.get("expiry_time")
+ if expiry_time:
+ self.expiry_time_msec = self.parse_duration(expiry_time)
+ else:
+ self.expiry_time_msec = None
+
# Resize all caches (if necessary) with the new factors we've loaded
self.resize_all_caches()
diff --git a/synapse/config/consent.py b/synapse/config/consent.py
index 30d07cc2..b05a9bd9 100644
--- a/synapse/config/consent.py
+++ b/synapse/config/consent.py
@@ -22,7 +22,7 @@ DEFAULT_CONFIG = """\
# User Consent configuration
#
# for detailed instructions, see
-# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
+# https://matrix-org.github.io/synapse/latest/consent_tracking.html
#
# Parts of this section are required if enabling the 'consent' resource under
# 'listeners', in particular 'template_dir' and 'version'.
diff --git a/synapse/config/database.py b/synapse/config/database.py
index c76ef1e1..3d7d92f6 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -62,7 +62,8 @@ DEFAULT_CONFIG = """\
# cp_min: 5
# cp_max: 10
#
-# For more information on using Synapse with Postgres, see `docs/postgres.md`.
+# For more information on using Synapse with Postgres,
+# see https://matrix-org.github.io/synapse/latest/postgres.html.
#
database:
name: sqlite3
diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py
index 9e07e730..9d295f58 100644
--- a/synapse/config/jwt.py
+++ b/synapse/config/jwt.py
@@ -64,7 +64,7 @@ class JWTConfig(Config):
# Note that this is a non-standard login type and client support is
# expected to be non-existent.
#
- # See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
+ # See https://matrix-org.github.io/synapse/latest/jwt.html.
#
#jwt_config:
# Uncomment the following to enable authorization using JSON web
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 91d9bcf3..ad4e6e61 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -49,7 +49,7 @@ DEFAULT_LOG_CONFIG = Template(
# be ingested by ELK stacks. See [2] for details.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
-# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
+# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/synapse/config/modules.py b/synapse/config/modules.py
index 3209e1c4..ae0821e5 100644
--- a/synapse/config/modules.py
+++ b/synapse/config/modules.py
@@ -37,7 +37,7 @@ class ModulesConfig(Config):
# Server admins can expand Synapse's functionality with external modules.
#
- # See https://matrix-org.github.io/synapse/develop/modules.html for more
+ # See https://matrix-org.github.io/synapse/latest/modules.html for more
# documentation on how to configure or create custom modules for Synapse.
#
modules:
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index ea0abf5a..942e2672 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -166,7 +166,7 @@ class OIDCConfig(Config):
#
# module: The class name of a custom mapping module. Default is
# {mapping_provider!r}.
- # See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
+ # See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
# for information on implementing a custom mapping provider.
#
# config: Configuration for the mapping provider module. This section will
@@ -217,7 +217,7 @@ class OIDCConfig(Config):
# - attribute: groups
# value: "admin"
#
- # See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
+ # See https://matrix-org.github.io/synapse/latest/openid.html
# for information on how to configure these options.
#
# For backwards compatibility, it is also possible to configure a single OIDC
diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py
index 1cf69734..fd90b797 100644
--- a/synapse/config/password_auth_providers.py
+++ b/synapse/config/password_auth_providers.py
@@ -57,7 +57,7 @@ class PasswordAuthProviderConfig(Config):
# ex. LDAP, external tokens, etc.
#
# For more information and known implementations, please see
- # https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md
+ # https://matrix-org.github.io/synapse/latest/password_auth_providers.html
#
# Note: instances wishing to use SAML or CAS authentication should
# instead use the `saml2_config` or `cas_config` options,
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index d9dc55a0..0ad919b1 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -119,6 +119,27 @@ class RegistrationConfig(Config):
session_lifetime = self.parse_duration(session_lifetime)
self.session_lifetime = session_lifetime
+ # The `access_token_lifetime` applies for tokens that can be renewed
+ # using a refresh token, as per MSC2918. If it is `None`, the refresh
+ # token mechanism is disabled.
+ #
+ # Since it is incompatible with the `session_lifetime` mechanism, it is set to
+ # `None` by default if a `session_lifetime` is set.
+ access_token_lifetime = config.get(
+ "access_token_lifetime", "5m" if session_lifetime is None else None
+ )
+ if access_token_lifetime is not None:
+ access_token_lifetime = self.parse_duration(access_token_lifetime)
+ self.access_token_lifetime = access_token_lifetime
+
+ if session_lifetime is not None and access_token_lifetime is not None:
+ raise ConfigError(
+ "The refresh token mechanism is incompatible with the "
+ "`session_lifetime` option. Consider disabling the "
+ "`session_lifetime` option or disabling the refresh token "
+ "mechanism by removing the `access_token_lifetime` option."
+ )
+
# The success template used during fallback auth.
self.fallback_success_template = self.read_template("auth_success.html")
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 2f77d670..a7a82742 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -250,7 +250,7 @@ class ContentRepositoryConfig(Config):
#
# If you are using a reverse proxy you may also need to set this value in
# your reverse proxy's config. Notably Nginx has a small max body size by default.
- # See https://matrix-org.github.io/synapse/develop/reverse_proxy.html.
+ # See https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
#max_upload_size: 50M
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 0833a5f7..6bff7152 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -153,7 +153,7 @@ ROOM_COMPLEXITY_TOO_GREAT = (
METRICS_PORT_WARNING = """\
The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
a listener. Please see
-https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
+https://matrix-org.github.io/synapse/latest/metrics-howto.html
on how to configure the new listener.
--------------------------------------------------------------------------------"""
@@ -811,7 +811,7 @@ class ServerConfig(Config):
# In most cases you should avoid using a matrix specific subdomain such as
# matrix.example.com or synapse.example.com as the server_name for the same
# reasons you wouldn't use user@email.example.com as your email address.
- # See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md
+ # See https://matrix-org.github.io/synapse/latest/delegate.html
# for information on how to host Synapse on a subdomain while preserving
# a clean server_name.
#
@@ -988,9 +988,9 @@ class ServerConfig(Config):
# 'all local interfaces'.
#
# type: the type of listener. Normally 'http', but other valid options are:
- # 'manhole' (see docs/manhole.md),
- # 'metrics' (see docs/metrics-howto.md),
- # 'replication' (see docs/workers.md).
+ # 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
+ # 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
+ # 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
@@ -1015,8 +1015,8 @@ class ServerConfig(Config):
# client: the client-server API (/_matrix/client), and the synapse admin
# API (/_synapse/admin). Also implies 'media' and 'static'.
#
- # consent: user consent forms (/_matrix/consent). See
- # docs/consent_tracking.md.
+ # consent: user consent forms (/_matrix/consent).
+ # See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
#
# federation: the server-server API (/_matrix/federation). Also implies
# 'media', 'keys', 'openid'
@@ -1025,12 +1025,13 @@ class ServerConfig(Config):
#
# media: the media API (/_matrix/media).
#
- # metrics: the metrics interface. See docs/metrics-howto.md.
+ # metrics: the metrics interface.
+ # See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
#
# openid: OpenID authentication.
#
- # replication: the HTTP replication API (/_synapse/replication). See
- # docs/workers.md.
+ # replication: the HTTP replication API (/_synapse/replication).
+ # See https://matrix-org.github.io/synapse/latest/workers.html.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
@@ -1050,7 +1051,7 @@ class ServerConfig(Config):
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
- # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
+ # https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
%(unsecure_http_bindings)s
diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py
index d0311d64..cb7716c8 100644
--- a/synapse/config/spam_checker.py
+++ b/synapse/config/spam_checker.py
@@ -26,7 +26,7 @@ LEGACY_SPAM_CHECKER_WARNING = """
This server is using a spam checker module that is implementing the deprecated spam
checker interface. Please check with the module's maintainer to see if a new version
supporting Synapse's generic modules system is available.
-For more information, please see https://matrix-org.github.io/synapse/develop/modules.html
+For more information, please see https://matrix-org.github.io/synapse/latest/modules.html
---------------------------------------------------------------------------------------"""
diff --git a/synapse/config/stats.py b/synapse/config/stats.py
index 3d44b512..78f61fe9 100644
--- a/synapse/config/stats.py
+++ b/synapse/config/stats.py
@@ -51,7 +51,7 @@ class StatsConfig(Config):
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
# Settings for local room and user statistics collection. See
- # docs/room_and_user_statistics.md.
+ # https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html.
#
stats:
# Uncomment the following to disable room and user statistics. Note that doing
diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py
index d0ea1726..21b9a883 100644
--- a/synapse/config/tracer.py
+++ b/synapse/config/tracer.py
@@ -81,7 +81,7 @@ class TracerConfig(Config):
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
- # See docs/opentracing.rst.
+ # See https://matrix-org.github.io/synapse/latest/opentracing.html.
#
# This is a list of regexes which are matched against the server_name of the
# homeserver.
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index 4cbf79ee..b10df8a2 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -50,7 +50,7 @@ class UserDirectoryConfig(Config):
#
# If you set it true, you'll have to rebuild the user_directory search
# indexes, see:
- # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
+ # https://matrix-org.github.io/synapse/latest/user_directory.html
#
# Uncomment to return search results containing all known users, even if that
# user does not share a room with the requester.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 33d7c602..89bcf815 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
-from typing import Any, Dict, List, Optional, Set, Tuple
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -29,6 +29,7 @@ from synapse.api.room_versions import (
RoomVersion,
)
from synapse.events import EventBase
+from synapse.events.builder import EventBuilder
from synapse.types import StateMap, UserID, get_domain_from_id
logger = logging.getLogger(__name__)
@@ -724,7 +725,7 @@ def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]:
return public_keys
-def auth_types_for_event(event: EventBase) -> Set[Tuple[str, str]]:
+def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str, str]]:
"""Given an event, return a list of (EventType, StateKey) that may be
needed to auth the event. The returned list may be a superset of what
would actually be required depending on the full state of the room.
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 0cb9c1cc..6286ad99 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -118,7 +118,7 @@ class _EventInternalMetadata:
proactively_send = DictProperty("proactively_send") # type: bool
redacted = DictProperty("redacted") # type: bool
txn_id = DictProperty("txn_id") # type: str
- token_id = DictProperty("token_id") # type: str
+ token_id = DictProperty("token_id") # type: int
historical = DictProperty("historical") # type: bool
# XXX: These are set by StreamWorkerStore._set_before_and_after.
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 81bf8615..26e39508 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -12,12 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import attr
from nacl.signing import SigningKey
-from synapse.api.auth import Auth
from synapse.api.constants import MAX_DEPTH
from synapse.api.errors import UnsupportedRoomVersionError
from synapse.api.room_versions import (
@@ -34,10 +33,14 @@ from synapse.types import EventID, JsonDict
from synapse.util import Clock
from synapse.util.stringutils import random_string
+if TYPE_CHECKING:
+ from synapse.handlers.event_auth import EventAuthHandler
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
-@attr.s(slots=True, cmp=False, frozen=True)
+@attr.s(slots=True, cmp=False, frozen=True, auto_attribs=True)
class EventBuilder:
"""A format independent event builder used to build up the event content
before signing the event.
@@ -62,31 +65,30 @@ class EventBuilder:
_signing_key: The signing key to use to sign the event as the server
"""
- _state = attr.ib(type=StateHandler)
- _auth = attr.ib(type=Auth)
- _store = attr.ib(type=DataStore)
- _clock = attr.ib(type=Clock)
- _hostname = attr.ib(type=str)
- _signing_key = attr.ib(type=SigningKey)
+ _state: StateHandler
+ _event_auth_handler: "EventAuthHandler"
+ _store: DataStore
+ _clock: Clock
+ _hostname: str
+ _signing_key: SigningKey
- room_version = attr.ib(type=RoomVersion)
+ room_version: RoomVersion
- room_id = attr.ib(type=str)
- type = attr.ib(type=str)
- sender = attr.ib(type=str)
+ room_id: str
+ type: str
+ sender: str
- content = attr.ib(default=attr.Factory(dict), type=JsonDict)
- unsigned = attr.ib(default=attr.Factory(dict), type=JsonDict)
+ content: JsonDict = attr.Factory(dict)
+ unsigned: JsonDict = attr.Factory(dict)
# These only exist on a subset of events, so they raise AttributeError if
# someone tries to get them when they don't exist.
- _state_key = attr.ib(default=None, type=Optional[str])
- _redacts = attr.ib(default=None, type=Optional[str])
- _origin_server_ts = attr.ib(default=None, type=Optional[int])
+ _state_key: Optional[str] = None
+ _redacts: Optional[str] = None
+ _origin_server_ts: Optional[int] = None
- internal_metadata = attr.ib(
- default=attr.Factory(lambda: _EventInternalMetadata({})),
- type=_EventInternalMetadata,
+ internal_metadata: _EventInternalMetadata = attr.Factory(
+ lambda: _EventInternalMetadata({})
)
@property
@@ -123,7 +125,9 @@ class EventBuilder:
state_ids = await self._state.get_current_state_ids(
self.room_id, prev_event_ids
)
- auth_event_ids = self._auth.compute_auth_events(self, state_ids)
+ auth_event_ids = self._event_auth_handler.compute_auth_events(
+ self, state_ids
+ )
format_version = self.room_version.event_format
if format_version == EventFormatVersions.V1:
@@ -184,24 +188,23 @@ class EventBuilder:
class EventBuilderFactory:
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.hostname = hs.hostname
self.signing_key = hs.signing_key
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
- self.auth = hs.get_auth()
+ self._event_auth_handler = hs.get_event_auth_handler()
- def new(self, room_version, key_values):
+ def new(self, room_version: str, key_values: dict) -> EventBuilder:
"""Generate an event builder appropriate for the given room version
Deprecated: use for_room_version with a RoomVersion object instead
Args:
- room_version (str): Version of the room that we're creating an event builder
- for
- key_values (dict): Fields used as the basis of the new event
+ room_version: Version of the room that we're creating an event builder for
+ key_values: Fields used as the basis of the new event
Returns:
EventBuilder
@@ -212,13 +215,15 @@ class EventBuilderFactory:
raise UnsupportedRoomVersionError()
return self.for_room_version(v, key_values)
- def for_room_version(self, room_version, key_values):
+ def for_room_version(
+ self, room_version: RoomVersion, key_values: dict
+ ) -> EventBuilder:
"""Generate an event builder appropriate for the given room version
Args:
- room_version (synapse.api.room_versions.RoomVersion):
+ room_version:
Version of the room that we're creating an event builder for
- key_values (dict): Fields used as the basis of the new event
+ key_values: Fields used as the basis of the new event
Returns:
EventBuilder
@@ -226,7 +231,7 @@ class EventBuilderFactory:
return EventBuilder(
store=self.store,
state=self.state,
- auth=self.auth,
+ event_auth_handler=self._event_auth_handler,
clock=self.clock,
hostname=self.hostname,
signing_key=self.signing_key,
@@ -286,15 +291,15 @@ def create_local_event_from_event_dict(
_event_id_counter = 0
-def _create_event_id(clock, hostname):
+def _create_event_id(clock: Clock, hostname: str) -> str:
"""Create a new event ID
Args:
- clock (Clock)
- hostname (str): The server name for the event ID
+ clock
+ hostname: The server name for the event ID
Returns:
- str
+ The new event ID
"""
global _event_id_counter
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index c066617b..2bfe6a3d 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -89,12 +89,12 @@ class FederationBase:
result = await self.spam_checker.check_event_for_spam(pdu)
if result:
- logger.warning(
- "Event contains spam, redacting %s: %s",
- pdu.event_id,
- pdu.get_pdu_json(),
- )
- return prune_event(pdu)
+ logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
+ # we redact (to save disk space) as well as soft-failing (to stop
+ # using the event in prev_events).
+ redacted_event = prune_event(pdu)
+ redacted_event.internal_metadata.soft_failed = True
+ return redacted_event
return pdu
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 1d050e54..ac0f2ccf 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -34,7 +34,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
-from synapse.api.constants import EduTypes, EventTypes
+from synapse.api.constants import EduTypes, EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -46,6 +46,7 @@ from synapse.api.errors import (
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
@@ -107,9 +108,9 @@ class FederationServer(FederationBase):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.auth = hs.get_auth()
self.handler = hs.get_federation_handler()
self.state = hs.get_state_handler()
+ self._event_auth_handler = hs.get_event_auth_handler()
self.device_handler = hs.get_device_handler()
@@ -147,6 +148,41 @@ class FederationServer(FederationBase):
self._room_prejoin_state_types = hs.config.api.room_prejoin_state
+ # Whether we have started handling old events in the staging area.
+ self._started_handling_of_staged_events = False
+
+ @wrap_as_background_process("_handle_old_staged_events")
+ async def _handle_old_staged_events(self) -> None:
+ """Handle old staged events by fetching all rooms that have staged
+ events and start the processing of each of those rooms.
+ """
+
+ # Get all the rooms IDs with staged events.
+ room_ids = await self.store.get_all_rooms_with_staged_incoming_events()
+
+ # We then shuffle them so that if there are multiple instances doing
+ # this work they're less likely to collide.
+ random.shuffle(room_ids)
+
+ for room_id in room_ids:
+ room_version = await self.store.get_room_version(room_id)
+
+ # Try and acquire the processing lock for the room, if we get it start a
+ # background process for handling the events in the room.
+ lock = await self.store.try_acquire_lock(
+ _INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
+ )
+ if lock:
+ logger.info("Handling old staged inbound events in %s", room_id)
+ self._process_incoming_pdus_in_room_inner(
+ room_id,
+ room_version,
+ lock,
+ )
+
+ # We pause a bit so that we don't start handling all rooms at once.
+ await self._clock.sleep(random.uniform(0, 0.1))
+
async def on_backfill_request(
self, origin: str, room_id: str, versions: List[str], limit: int
) -> Tuple[int, Dict[str, Any]]:
@@ -165,6 +201,12 @@ class FederationServer(FederationBase):
async def on_incoming_transaction(
self, origin: str, transaction_data: JsonDict
) -> Tuple[int, Dict[str, Any]]:
+ # If we receive a transaction we should make sure that kick off handling
+ # any old events in the staging area.
+ if not self._started_handling_of_staged_events:
+ self._started_handling_of_staged_events = True
+ self._handle_old_staged_events()
+
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
@@ -368,22 +410,21 @@ class FederationServer(FederationBase):
async def process_pdu(pdu: EventBase) -> JsonDict:
event_id = pdu.event_id
- with pdu_process_time.time():
- with nested_logging_context(event_id):
- try:
- await self._handle_received_pdu(origin, pdu)
- return {}
- except FederationError as e:
- logger.warning("Error handling PDU %s: %s", event_id, e)
- return {"error": str(e)}
- except Exception as e:
- f = failure.Failure()
- logger.error(
- "Failed to handle PDU %s",
- event_id,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
- )
- return {"error": str(e)}
+ with nested_logging_context(event_id):
+ try:
+ await self._handle_received_pdu(origin, pdu)
+ return {}
+ except FederationError as e:
+ logger.warning("Error handling PDU %s: %s", event_id, e)
+ return {"error": str(e)}
+ except Exception as e:
+ f = failure.Failure()
+ logger.error(
+ "Failed to handle PDU %s",
+ event_id,
+ exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
+ )
+ return {"error": str(e)}
await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
@@ -420,7 +461,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self.auth.check_host_in_room(room_id, origin)
+ in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -453,7 +494,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self.auth.check_host_in_room(room_id, origin)
+ in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -544,26 +585,21 @@ class FederationServer(FederationBase):
return {"event": ret_pdu.get_pdu_json(time_now)}
async def on_send_join_request(
- self, origin: str, content: JsonDict
+ self, origin: str, content: JsonDict, room_id: str
) -> Dict[str, Any]:
- logger.debug("on_send_join_request: content: %s", content)
-
- assert_params_in_dict(content, ["room_id"])
- room_version = await self.store.get_room_version(content["room_id"])
- pdu = event_from_pdu_json(content, room_version)
-
- origin_host, _ = parse_server_name(origin)
- await self.check_server_matches_acl(origin_host, pdu.room_id)
-
- logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
+ context = await self._on_send_membership_event(
+ origin, content, Membership.JOIN, room_id
+ )
- pdu = await self._check_sigs_and_hash(room_version, pdu)
+ prev_state_ids = await context.get_prev_state_ids()
+ state_ids = list(prev_state_ids.values())
+ auth_chain = await self.store.get_auth_chain(room_id, state_ids)
+ state = await self.store.get_events(state_ids)
- res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return {
- "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
- "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
+ "state": [p.get_pdu_json(time_now) for p in state.values()],
+ "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain],
}
async def on_make_leave_request(
@@ -578,21 +614,11 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict:
+ async def on_send_leave_request(
+ self, origin: str, content: JsonDict, room_id: str
+ ) -> dict:
logger.debug("on_send_leave_request: content: %s", content)
-
- assert_params_in_dict(content, ["room_id"])
- room_version = await self.store.get_room_version(content["room_id"])
- pdu = event_from_pdu_json(content, room_version)
-
- origin_host, _ = parse_server_name(origin)
- await self.check_server_matches_acl(origin_host, pdu.room_id)
-
- logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
-
- pdu = await self._check_sigs_and_hash(room_version, pdu)
-
- await self.handler.on_send_leave_request(origin, pdu)
+ await self._on_send_membership_event(origin, content, Membership.LEAVE, room_id)
return {}
async def on_make_knock_request(
@@ -658,39 +684,76 @@ class FederationServer(FederationBase):
Returns:
The stripped room state.
"""
- logger.debug("on_send_knock_request: content: %s", content)
+ event_context = await self._on_send_membership_event(
+ origin, content, Membership.KNOCK, room_id
+ )
+
+ # Retrieve stripped state events from the room and send them back to the remote
+ # server. This will allow the remote server's clients to display information
+ # related to the room while the knock request is pending.
+ stripped_room_state = (
+ await self.store.get_stripped_room_state_from_event_context(
+ event_context, self._room_prejoin_state_types
+ )
+ )
+ return {"knock_state_events": stripped_room_state}
+
+ async def _on_send_membership_event(
+ self, origin: str, content: JsonDict, membership_type: str, room_id: str
+ ) -> EventContext:
+ """Handle an on_send_{join,leave,knock} request
+
+ Does some preliminary validation before passing the request on to the
+ federation handler.
+
+ Args:
+ origin: The (authenticated) requesting server
+ content: The body of the send_* request - a complete membership event
+ membership_type: The expected membership type (join or leave, depending
+ on the endpoint)
+ room_id: The room_id from the request, to be validated against the room_id
+ in the event
+
+ Returns:
+ The context of the event after inserting it into the room graph.
+
+ Raises:
+ SynapseError if there is a problem with the request, including things like
+ the room_id not matching or the event not being authorized.
+ """
+ assert_params_in_dict(content, ["room_id"])
+ if content["room_id"] != room_id:
+ raise SynapseError(
+ 400,
+ "Room ID in body does not match that in request path",
+ Codes.BAD_JSON,
+ )
room_version = await self.store.get_room_version(room_id)
- # Check that this room supports knocking as defined by its room version
- if not room_version.msc2403_knocking:
+ if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
raise SynapseError(
403,
"This room version does not support knocking",
errcode=Codes.FORBIDDEN,
)
- pdu = event_from_pdu_json(content, room_version)
+ event = event_from_pdu_json(content, room_version)
- origin_host, _ = parse_server_name(origin)
- await self.check_server_matches_acl(origin_host, pdu.room_id)
+ if event.type != EventTypes.Member or not event.is_state():
+ raise SynapseError(400, "Not an m.room.member event", Codes.BAD_JSON)
- logger.debug("on_send_knock_request: pdu sigs: %s", pdu.signatures)
+ if event.content.get("membership") != membership_type:
+ raise SynapseError(400, "Not a %s event" % membership_type, Codes.BAD_JSON)
- pdu = await self._check_sigs_and_hash(room_version, pdu)
+ origin_host, _ = parse_server_name(origin)
+ await self.check_server_matches_acl(origin_host, event.room_id)
- # Handle the event, and retrieve the EventContext
- event_context = await self.handler.on_send_knock_request(origin, pdu)
+ logger.debug("_on_send_membership_event: pdu sigs: %s", event.signatures)
- # Retrieve stripped state events from the room and send them back to the remote
- # server. This will allow the remote server's clients to display information
- # related to the room while the knock request is pending.
- stripped_room_state = (
- await self.store.get_stripped_room_state_from_event_context(
- event_context, self._room_prejoin_state_types
- )
- )
- return {"knock_state_events": stripped_room_state}
+ event = await self._check_sigs_and_hash(room_version, event)
+
+ return await self.handler.on_send_membership_event(origin, event)
async def on_event_auth(
self, origin: str, room_id: str, event_id: str
@@ -860,32 +923,39 @@ class FederationServer(FederationBase):
room_id: str,
room_version: RoomVersion,
lock: Lock,
- latest_origin: str,
- latest_event: EventBase,
+ latest_origin: Optional[str] = None,
+ latest_event: Optional[EventBase] = None,
) -> None:
"""Process events in the staging area for the given room.
The latest_origin and latest_event args are the latest origin and event
- received.
+ received (or None to simply pull the next event from the database).
"""
# The common path is for the event we just received be the only event in
# the room, so instead of pulling the event out of the DB and parsing
# the event we just pull out the next event ID and check if that matches.
- next_origin, next_event_id = await self.store.get_next_staged_event_id_for_room(
- room_id
- )
- if next_origin == latest_origin and next_event_id == latest_event.event_id:
- origin = latest_origin
- event = latest_event
- else:
+ if latest_event is not None and latest_origin is not None:
+ (
+ next_origin,
+ next_event_id,
+ ) = await self.store.get_next_staged_event_id_for_room(room_id)
+ if next_origin != latest_origin or next_event_id != latest_event.event_id:
+ latest_origin = None
+ latest_event = None
+
+ if latest_origin is None or latest_event is None:
next = await self.store.get_next_staged_event_for_room(
room_id, room_version
)
if not next:
+ await lock.release()
return
origin, event = next
+ else:
+ origin = latest_origin
+ event = latest_event
# We loop round until there are no more events in the room in the
# staging area, or we fail to get the lock (which means another process
@@ -909,9 +979,13 @@ class FederationServer(FederationBase):
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
- await self.store.remove_received_event_from_staging(
+ received_ts = await self.store.remove_received_event_from_staging(
origin, event.event_id
)
+ if received_ts is not None:
+ pdu_process_time.observe(
+ (self._clock.time_msec() - received_ts) / 1000
+ )
# We need to do this check outside the lock to avoid a race between
# a new event being inserted by another instance and it attempting
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index bed47f8a..d37d9565 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -15,7 +15,19 @@
import functools
import logging
import re
-from typing import Container, Mapping, Optional, Sequence, Tuple, Type
+from typing import (
+ Container,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from typing_extensions import Literal
import synapse
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
@@ -56,15 +68,15 @@ logger = logging.getLogger(__name__)
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
- def __init__(self, hs, servlet_groups=None):
+ def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
"""Initialize the TransportLayerServer
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
- hs (synapse.server.HomeServer): homeserver
- servlet_groups (list[str], optional): List of servlet groups to register.
+ hs: homeserver
+ servlet_groups: List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
self.hs = hs
@@ -78,7 +90,7 @@ class TransportLayerServer(JsonResource):
self.register_servlets()
- def register_servlets(self):
+ def register_servlets(self) -> None:
register_servlets(
self.hs,
resource=self,
@@ -91,14 +103,10 @@ class TransportLayerServer(JsonResource):
class AuthenticationError(SynapseError):
"""There was a problem authenticating the request"""
- pass
-
class NoAuthenticationError(AuthenticationError):
"""The request had no authentication information"""
- pass
-
class Authenticator:
def __init__(self, hs: HomeServer):
@@ -410,13 +418,18 @@ class FederationSendServlet(BaseFederationServerServlet):
RATELIMIT = False
# This is when someone is trying to send us a bunch of data.
- async def on_PUT(self, origin, content, query, transaction_id):
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ transaction_id: str,
+ ) -> Tuple[int, JsonDict]:
"""Called on PUT /send/<transaction_id>/
Args:
- request (twisted.web.http.Request): The HTTP request.
- transaction_id (str): The transaction_id associated with this
- request. This is *not* None.
+ transaction_id: The transaction_id associated with this request. This
+ is *not* None.
Returns:
Tuple of `(code, response)`, where
@@ -461,7 +474,13 @@ class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
- async def on_GET(self, origin, content, query, event_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ event_id: str,
+ ) -> Tuple[int, Union[JsonDict, str]]:
return await self.handler.on_pdu_request(origin, event_id)
@@ -469,7 +488,13 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
# This is when someone asks for all data for a given room.
- async def on_GET(self, origin, content, query, room_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
return await self.handler.on_room_state_request(
origin,
room_id,
@@ -480,7 +505,13 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
- async def on_GET(self, origin, content, query, room_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
return await self.handler.on_state_ids_request(
origin,
room_id,
@@ -491,7 +522,13 @@ class FederationStateIdsServlet(BaseFederationServerServlet):
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
- async def on_GET(self, origin, content, query, room_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
@@ -505,7 +542,13 @@ class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
- async def on_GET(self, origin, content, query, query_type):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ query_type: str,
+ ) -> Tuple[int, JsonDict]:
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
args["origin"] = origin
return await self.handler.on_query_request(query_type, args)
@@ -514,47 +557,66 @@ class FederationQueryServlet(BaseFederationServerServlet):
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
- async def on_GET(self, origin, _content, query, room_id, user_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
"""
Args:
- origin (unicode): The authenticated server_name of the calling server
+ origin: The authenticated server_name of the calling server
- _content (None): (GETs don't have bodies)
+ content: (GETs don't have bodies)
- query (dict[bytes, list[bytes]]): Query params from the request.
+ query: Query params from the request.
- **kwargs (dict[unicode, unicode]): the dict mapping keys to path
- components as specified in the path match regexp.
+ **kwargs: the dict mapping keys to path components as specified in
+ the path match regexp.
Returns:
- Tuple[int, object]: (response code, response object)
+ Tuple of (response code, response object)
"""
- versions = query.get(b"ver")
- if versions is not None:
- supported_versions = [v.decode("utf-8") for v in versions]
- else:
+ supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
+ if supported_versions is None:
supported_versions = ["1"]
- content = await self.handler.on_make_join_request(
+ result = await self.handler.on_make_join_request(
origin, room_id, user_id, supported_versions=supported_versions
)
- return 200, content
+ return 200, result
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
- async def on_GET(self, origin, content, query, room_id, user_id):
- content = await self.handler.on_make_leave_request(origin, room_id, user_id)
- return 200, content
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_make_leave_request(origin, room_id, user_id)
+ return 200, result
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
- async def on_PUT(self, origin, content, query, room_id, event_id):
- content = await self.handler.on_send_leave_request(origin, content)
- return 200, (200, content)
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
+ result = await self.handler.on_send_leave_request(origin, content, room_id)
+ return 200, (200, result)
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
@@ -562,50 +624,84 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PREFIX = FEDERATION_V2_PREFIX
- async def on_PUT(self, origin, content, query, room_id, event_id):
- content = await self.handler.on_send_leave_request(origin, content)
- return 200, content
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_send_leave_request(origin, content, room_id)
+ return 200, result
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
- async def on_GET(self, origin, content, query, room_id, user_id):
- try:
- # Retrieve the room versions the remote homeserver claims to support
- supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
- except KeyError:
- raise SynapseError(400, "Missing required query parameter 'ver'")
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # Retrieve the room versions the remote homeserver claims to support
+ supported_versions = parse_strings_from_args(
+ query, "ver", required=True, encoding="utf-8"
+ )
- content = await self.handler.on_make_knock_request(
+ result = await self.handler.on_make_knock_request(
origin, room_id, user_id, supported_versions=supported_versions
)
- return 200, content
+ return 200, result
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
- async def on_PUT(self, origin, content, query, room_id, event_id):
- content = await self.handler.on_send_knock_request(origin, content, room_id)
- return 200, content
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ result = await self.handler.on_send_knock_request(origin, content, room_id)
+ return 200, result
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
- async def on_GET(self, origin, content, query, room_id, event_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
return await self.handler.on_event_auth(origin, room_id, event_id)
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
- async def on_PUT(self, origin, content, query, room_id, event_id):
- # TODO(paul): assert that room_id/event_id parsed from path actually
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
+ # TODO(paul): assert that event_id parsed from path actually
# match those given in content
- content = await self.handler.on_send_join_request(origin, content)
- return 200, (200, content)
+ result = await self.handler.on_send_join_request(origin, content, room_id)
+ return 200, (200, result)
class FederationV2SendJoinServlet(BaseFederationServerServlet):
@@ -613,28 +709,42 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
PREFIX = FEDERATION_V2_PREFIX
- async def on_PUT(self, origin, content, query, room_id, event_id):
- # TODO(paul): assert that room_id/event_id parsed from path actually
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ # TODO(paul): assert that event_id parsed from path actually
# match those given in content
- content = await self.handler.on_send_join_request(origin, content)
- return 200, content
+ result = await self.handler.on_send_join_request(origin, content, room_id)
+ return 200, result
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
- async def on_PUT(self, origin, content, query, room_id, event_id):
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, Tuple[int, JsonDict]]:
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
- content = await self.handler.on_invite_request(
+ result = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
- return 200, (200, content)
+ return 200, (200, result)
class FederationV2InviteServlet(BaseFederationServerServlet):
@@ -642,7 +752,14 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
PREFIX = FEDERATION_V2_PREFIX
- async def on_PUT(self, origin, content, query, room_id, event_id):
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
@@ -655,16 +772,22 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
- content = await self.handler.on_invite_request(
+ result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
- return 200, content
+ return 200, result
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
- async def on_PUT(self, origin, content, query, room_id):
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
await self.handler.on_exchange_third_party_invite_request(content)
return 200, {}
@@ -672,21 +795,31 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
- async def on_POST(self, origin, content, query):
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
- async def on_GET(self, origin, content, query, user_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
- async def on_POST(self, origin, content, query):
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
@@ -695,12 +828,18 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
- async def on_POST(self, origin, content, query, room_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
- content = await self.handler.on_get_missing_events(
+ result = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
@@ -708,7 +847,7 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
limit=limit,
)
- return 200, content
+ return 200, result
class On3pidBindServlet(BaseFederationServerServlet):
@@ -716,7 +855,9 @@ class On3pidBindServlet(BaseFederationServerServlet):
REQUIRE_AUTH = False
- async def on_POST(self, origin, content, query):
+ async def on_POST(
+ self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
if "invites" in content:
last_exception = None
for invite in content["invites"]:
@@ -762,15 +903,20 @@ class OpenIdUserInfo(BaseFederationServerServlet):
REQUIRE_AUTH = False
- async def on_GET(self, origin, content, query):
- token = query.get(b"access_token", [None])[0]
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ ) -> Tuple[int, JsonDict]:
+ token = parse_string_from_args(query, "access_token")
if token is None:
return (
401,
{"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
)
- user_id = await self.handler.on_openid_userinfo(token.decode("ascii"))
+ user_id = await self.handler.on_openid_userinfo(token)
if user_id is None:
return (
@@ -829,7 +975,9 @@ class PublicRoomList(BaseFederationServlet):
self.handler = hs.get_room_list_handler()
self.allow_access = allow_access
- async def on_GET(self, origin, content, query):
+ async def on_GET(
+ self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
if not self.allow_access:
raise FederationDeniedError(origin)
@@ -858,7 +1006,9 @@ class PublicRoomList(BaseFederationServlet):
)
return 200, data
- async def on_POST(self, origin, content, query):
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
# This implements MSC2197 (Search Filtering over Federation)
if not self.allow_access:
raise FederationDeniedError(origin)
@@ -904,7 +1054,12 @@ class FederationVersionServlet(BaseFederationServlet):
REQUIRE_AUTH = False
- async def on_GET(self, origin, content, query):
+ async def on_GET(
+ self,
+ origin: Optional[str],
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ ) -> Tuple[int, JsonDict]:
return (
200,
{"server": {"name": "Synapse", "version": get_version_string(synapse)}},
@@ -933,7 +1088,13 @@ class FederationGroupsProfileServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/profile"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -942,7 +1103,13 @@ class FederationGroupsProfileServlet(BaseGroupsServerServlet):
return 200, new_content
- async def on_POST(self, origin, content, query, group_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -957,7 +1124,13 @@ class FederationGroupsProfileServlet(BaseGroupsServerServlet):
class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -972,7 +1145,13 @@ class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -987,7 +1166,14 @@ class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
- async def on_POST(self, origin, content, query, group_id, room_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -998,7 +1184,14 @@ class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
return 200, new_content
- async def on_DELETE(self, origin, content, query, group_id, room_id):
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1018,7 +1211,15 @@ class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
"/config/(?P<config_key>[^/]*)"
)
- async def on_POST(self, origin, content, query, group_id, room_id, config_key):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ room_id: str,
+ config_key: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1035,7 +1236,13 @@ class FederationGroupsUsersServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/users"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1050,7 +1257,13 @@ class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1067,7 +1280,14 @@ class FederationGroupsInviteServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1084,7 +1304,14 @@ class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
@@ -1098,7 +1325,14 @@ class FederationGroupsJoinServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
@@ -1112,7 +1346,14 @@ class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1146,7 +1387,14 @@ class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "group_id doesn't match origin")
@@ -1164,7 +1412,14 @@ class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, None]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
@@ -1172,11 +1427,9 @@ class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
self.handler, GroupsLocalHandler
), "Workers cannot handle group removals."
- new_content = await self.handler.user_removed_from_group(
- group_id, user_id, content
- )
+ await self.handler.user_removed_from_group(group_id, user_id, content)
- return 200, new_content
+ return 200, None
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
@@ -1194,7 +1447,14 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_attestation_renewer()
- async def on_POST(self, origin, content, query, group_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
# We don't need to check auth here as we check the attestation signatures
new_content = await self.handler.on_renew_attestation(
@@ -1218,7 +1478,15 @@ class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
"/rooms/(?P<room_id>[^/]*)"
)
- async def on_POST(self, origin, content, query, group_id, category_id, room_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1246,7 +1514,15 @@ class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_DELETE(self, origin, content, query, group_id, category_id, room_id):
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1266,7 +1542,13 @@ class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1281,7 +1563,14 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
- async def on_GET(self, origin, content, query, group_id, category_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1292,7 +1581,14 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_POST(self, origin, content, query, group_id, category_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1314,7 +1610,14 @@ class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_DELETE(self, origin, content, query, group_id, category_id):
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ category_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1334,7 +1637,13 @@ class FederationGroupsRolesServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
- async def on_GET(self, origin, content, query, group_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1349,7 +1658,14 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
- async def on_GET(self, origin, content, query, group_id, role_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1358,7 +1674,14 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_POST(self, origin, content, query, group_id, role_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1382,7 +1705,14 @@ class FederationGroupsRoleServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_DELETE(self, origin, content, query, group_id, role_id):
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1411,7 +1741,15 @@ class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
"/users/(?P<user_id>[^/]*)"
)
- async def on_POST(self, origin, content, query, group_id, role_id, user_id):
+ async def on_POST(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1437,7 +1775,15 @@ class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
return 200, resp
- async def on_DELETE(self, origin, content, query, group_id, role_id, user_id):
+ async def on_DELETE(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ role_id: str,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1457,7 +1803,9 @@ class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
PATH = "/get_groups_publicised"
- async def on_POST(self, origin, content, query):
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
resp = await self.handler.bulk_get_publicised_groups(
content["user_ids"], proxy=False
)
@@ -1470,7 +1818,13 @@ class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
- async def on_PUT(self, origin, content, query, group_id):
+ async def on_PUT(
+ self,
+ origin: str,
+ content: JsonDict,
+ query: Dict[bytes, List[bytes]],
+ group_id: str,
+ ) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
@@ -1499,7 +1853,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
async def on_GET(
self,
origin: str,
- content: JsonDict,
+ content: Literal[None],
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
@@ -1571,7 +1925,13 @@ class RoomComplexityServlet(BaseFederationServlet):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._store = self.hs.get_datastore()
- async def on_GET(self, origin, content, query, room_id):
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: Dict[bytes, List[bytes]],
+ room_id: str,
+ ) -> Tuple[int, JsonDict]:
is_public = await self._store.is_room_world_readable_or_publicly_joinable(
room_id
)
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f72ded03..d75a8b15 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -62,9 +62,16 @@ class AdminHandler(BaseHandler):
if ret:
profile = await self.store.get_profileinfo(user.localpart)
threepids = await self.store.user_get_threepids(user.to_string())
+ external_ids = [
+ ({"auth_provider": auth_provider, "external_id": external_id})
+ for auth_provider, external_id in await self.store.get_external_ids_by_user(
+ user.to_string()
+ )
+ ]
ret["displayname"] = profile.display_name
ret["avatar_url"] = profile.avatar_url
ret["threepids"] = threepids
+ ret["external_ids"] = external_ids
return ret
async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any:
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 1971e373..e2ac595a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -30,6 +30,7 @@ from typing import (
Optional,
Tuple,
Union,
+ cast,
)
import attr
@@ -72,6 +73,7 @@ from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
if TYPE_CHECKING:
+ from synapse.rest.client.v1.login import LoginResponse
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -777,6 +779,108 @@ class AuthHandler(BaseHandler):
"params": params,
}
+ async def refresh_token(
+ self,
+ refresh_token: str,
+ valid_until_ms: Optional[int],
+ ) -> Tuple[str, str]:
+ """
+ Consumes a refresh token and generate both a new access token and a new refresh token from it.
+
+ The consumed refresh token is considered invalid after the first use of the new access token or the new refresh token.
+
+ Args:
+ refresh_token: The token to consume.
+ valid_until_ms: The expiration timestamp of the new access token.
+
+ Returns:
+ A tuple containing the new access token and refresh token
+ """
+
+ # Verify the token signature first before looking up the token
+ if not self._verify_refresh_token(refresh_token):
+ raise SynapseError(401, "invalid refresh token", Codes.UNKNOWN_TOKEN)
+
+ existing_token = await self.store.lookup_refresh_token(refresh_token)
+ if existing_token is None:
+ raise SynapseError(401, "refresh token does not exist", Codes.UNKNOWN_TOKEN)
+
+ if (
+ existing_token.has_next_access_token_been_used
+ or existing_token.has_next_refresh_token_been_refreshed
+ ):
+ raise SynapseError(
+ 403, "refresh token isn't valid anymore", Codes.FORBIDDEN
+ )
+
+ (
+ new_refresh_token,
+ new_refresh_token_id,
+ ) = await self.get_refresh_token_for_user_id(
+ user_id=existing_token.user_id, device_id=existing_token.device_id
+ )
+ access_token = await self.get_access_token_for_user_id(
+ user_id=existing_token.user_id,
+ device_id=existing_token.device_id,
+ valid_until_ms=valid_until_ms,
+ refresh_token_id=new_refresh_token_id,
+ )
+ await self.store.replace_refresh_token(
+ existing_token.token_id, new_refresh_token_id
+ )
+ return access_token, new_refresh_token
+
+ def _verify_refresh_token(self, token: str) -> bool:
+ """
+ Verifies the shape of a refresh token.
+
+ Args:
+ token: The refresh token to verify
+
+ Returns:
+ Whether the token has the right shape
+ """
+ parts = token.split("_", maxsplit=4)
+ if len(parts) != 4:
+ return False
+
+ type, localpart, rand, crc = parts
+
+ # Refresh tokens are prefixed by "syr_", let's check that
+ if type != "syr":
+ return False
+
+ # Check the CRC
+ base = f"{type}_{localpart}_{rand}"
+ expected_crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ if crc != expected_crc:
+ return False
+
+ return True
+
+ async def get_refresh_token_for_user_id(
+ self,
+ user_id: str,
+ device_id: str,
+ ) -> Tuple[str, int]:
+ """
+ Creates a new refresh token for the user with the given user ID.
+
+ Args:
+ user_id: canonical user ID
+ device_id: the device ID to associate with the token.
+
+ Returns:
+ The newly created refresh token and its ID in the database
+ """
+ refresh_token = self.generate_refresh_token(UserID.from_string(user_id))
+ refresh_token_id = await self.store.add_refresh_token_to_user(
+ user_id=user_id,
+ token=refresh_token,
+ device_id=device_id,
+ )
+ return refresh_token, refresh_token_id
+
async def get_access_token_for_user_id(
self,
user_id: str,
@@ -784,6 +888,7 @@ class AuthHandler(BaseHandler):
valid_until_ms: Optional[int],
puppets_user_id: Optional[str] = None,
is_appservice_ghost: bool = False,
+ refresh_token_id: Optional[int] = None,
) -> str:
"""
Creates a new access token for the user with the given user ID.
@@ -801,6 +906,8 @@ class AuthHandler(BaseHandler):
valid_until_ms: when the token is valid until. None for
no expiry.
is_appservice_ghost: Whether the user is an application ghost user
+ refresh_token_id: the refresh token ID that will be associated with
+ this access token.
Returns:
The access token for the user's session.
Raises:
@@ -836,6 +943,7 @@ class AuthHandler(BaseHandler):
device_id=device_id,
valid_until_ms=valid_until_ms,
puppets_user_id=puppets_user_id,
+ refresh_token_id=refresh_token_id,
)
# the device *should* have been registered before we got here; however,
@@ -928,7 +1036,7 @@ class AuthHandler(BaseHandler):
self,
login_submission: Dict[str, Any],
ratelimit: bool = False,
- ) -> Tuple[str, Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
+ ) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
"""Authenticates the user for the /login API
Also used by the user-interactive auth flow to validate auth types which don't
@@ -1073,7 +1181,7 @@ class AuthHandler(BaseHandler):
self,
username: str,
login_submission: Dict[str, Any],
- ) -> Tuple[str, Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
+ ) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
"""Helper for validate_login
Handles login, once we've mapped 3pids onto userids
@@ -1151,7 +1259,7 @@ class AuthHandler(BaseHandler):
async def check_password_provider_3pid(
self, medium: str, address: str, password: str
- ) -> Tuple[Optional[str], Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
+ ) -> Tuple[Optional[str], Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
"""Check if a password provider is able to validate a thirdparty login
Args:
@@ -1215,6 +1323,19 @@ class AuthHandler(BaseHandler):
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
+ def generate_refresh_token(self, for_user: UserID) -> str:
+ """Generates an opaque string, for use as a refresh token"""
+
+ # we use the following format for refresh tokens:
+ # syr_<base64 local part>_<random string>_<base62 crc check>
+
+ b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8"))
+ random_string = stringutils.random_string(20)
+ base = f"syr_{b64local}_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
async def validate_short_term_login_token(
self, login_token: str
) -> LoginTokenAttributes:
@@ -1563,7 +1684,7 @@ class AuthHandler(BaseHandler):
)
respond_with_html(request, 200, html)
- async def _sso_login_callback(self, login_result: JsonDict) -> None:
+ async def _sso_login_callback(self, login_result: "LoginResponse") -> None:
"""
A login callback which might add additional attributes to the login response.
@@ -1577,7 +1698,8 @@ class AuthHandler(BaseHandler):
extra_attributes = self._extra_attributes.get(login_result["user_id"])
if extra_attributes:
- login_result.update(extra_attributes.extra_attributes)
+ login_result_dict = cast(Dict[str, Any], login_result)
+ login_result_dict.update(extra_attributes.extra_attributes)
def _expire_sso_extra_attributes(self) -> None:
"""
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index 989996b6..41dbdfd0 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -11,8 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, Collection, Optional
+from typing import TYPE_CHECKING, Collection, List, Optional, Union
+from synapse import event_auth
from synapse.api.constants import (
EventTypes,
JoinRules,
@@ -20,9 +21,11 @@ from synapse.api.constants import (
RestrictedJoinRuleTypes,
)
from synapse.api.errors import AuthError
-from synapse.api.room_versions import RoomVersion
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase
+from synapse.events.builder import EventBuilder
from synapse.types import StateMap
+from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -34,8 +37,63 @@ class EventAuthHandler:
"""
def __init__(self, hs: "HomeServer"):
+ self._clock = hs.get_clock()
self._store = hs.get_datastore()
+ async def check_from_context(
+ self, room_version: str, event, context, do_sig_check=True
+ ) -> None:
+ auth_event_ids = event.auth_event_ids()
+ auth_events_by_id = await self._store.get_events(auth_event_ids)
+ auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
+
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+ event_auth.check(
+ room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
+ )
+
+ def compute_auth_events(
+ self,
+ event: Union[EventBase, EventBuilder],
+ current_state_ids: StateMap[str],
+ for_verification: bool = False,
+ ) -> List[str]:
+ """Given an event and current state return the list of event IDs used
+ to auth an event.
+
+ If `for_verification` is False then only return auth events that
+ should be added to the event's `auth_events`.
+
+ Returns:
+ List of event IDs.
+ """
+
+ if event.type == EventTypes.Create:
+ return []
+
+ # Currently we ignore the `for_verification` flag even though there are
+ # some situations where we can drop particular auth events when adding
+ # to the event's `auth_events` (e.g. joins pointing to previous joins
+ # when room is publicly joinable). Dropping event IDs has the
+ # advantage that the auth chain for the room grows slower, but we use
+ # the auth chain in state resolution v2 to order events, which means
+ # care must be taken if dropping events to ensure that it doesn't
+ # introduce undesirable "state reset" behaviour.
+ #
+ # All of which sounds a bit tricky so we don't bother for now.
+
+ auth_ids = []
+ for etype, state_key in event_auth.auth_types_for_event(event):
+ auth_ev_id = current_state_ids.get((etype, state_key))
+ if auth_ev_id:
+ auth_ids.append(auth_ev_id)
+
+ return auth_ids
+
+ async def check_host_in_room(self, room_id: str, host: str) -> bool:
+ with Measure(self._clock, "check_host_in_room"):
+ return await self._store.is_host_joined(room_id, host)
+
async def check_restricted_join_rules(
self,
state_ids: StateMap[str],
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 1b566dbf..991ec991 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -250,7 +250,9 @@ class FederationHandler(BaseHandler):
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
- is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
+ is_in_room = await self._event_auth_handler.check_host_in_room(
+ room_id, self.server_name
+ )
if not is_in_room:
logger.info(
"Ignoring PDU from %s as we're not in the room",
@@ -1674,7 +1676,9 @@ class FederationHandler(BaseHandler):
room_version = await self.store.get_room_version_id(room_id)
# now check that we are *still* in the room
- is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
+ is_in_room = await self._event_auth_handler.check_host_in_room(
+ room_id, self.server_name
+ )
if not is_in_room:
logger.info(
"Got /make_join request for room %s we are no longer in",
@@ -1705,86 +1709,12 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self.auth.check_from_context(
+ await self._event_auth_handler.check_from_context(
room_version, event, context, do_sig_check=False
)
return event
- async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict:
- """We have received a join event for a room. Fully process it and
- respond with the current state and auth chains.
- """
- event = pdu
-
- logger.debug(
- "on_send_join_request from %s: Got event: %s, signatures: %s",
- origin,
- event.event_id,
- event.signatures,
- )
-
- if get_domain_from_id(event.sender) != origin:
- logger.info(
- "Got /send_join request for user %r from different origin %s",
- event.sender,
- origin,
- )
- raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
-
- event.internal_metadata.outlier = False
- # Send this event on behalf of the origin server.
- #
- # The reasons we have the destination server rather than the origin
- # server send it are slightly mysterious: the origin server should have
- # all the necessary state once it gets the response to the send_join,
- # so it could send the event itself if it wanted to. It may be that
- # doing it this way reduces failure modes, or avoids certain attacks
- # where a new server selectively tells a subset of the federation that
- # it has joined.
- #
- # The fact is that, as of the current writing, Synapse doesn't send out
- # the join event over federation after joining, and changing it now
- # would introduce the danger of backwards-compatibility problems.
- event.internal_metadata.send_on_behalf_of = origin
-
- # Calculate the event context.
- context = await self.state_handler.compute_event_context(event)
-
- # Get the state before the new event.
- prev_state_ids = await context.get_prev_state_ids()
-
- # Check if the user is already in the room or invited to the room.
- user_id = event.state_key
- prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
- prev_member_event = None
- if prev_member_event_id:
- prev_member_event = await self.store.get_event(prev_member_event_id)
-
- # Check if the member should be allowed access via membership in a space.
- await self._event_auth_handler.check_restricted_join_rules(
- prev_state_ids,
- event.room_version,
- user_id,
- prev_member_event,
- )
-
- # Persist the event.
- await self._auth_and_persist_event(origin, event, context)
-
- logger.debug(
- "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s",
- event.event_id,
- event.signatures,
- )
-
- state_ids = list(prev_state_ids.values())
- auth_chain = await self.store.get_auth_chain(event.room_id, state_ids)
-
- state = await self.store.get_events(list(prev_state_ids.values()))
-
- return {"state": list(state.values()), "auth_chain": auth_chain}
-
async def on_invite_request(
self, origin: str, event: EventBase, room_version: RoomVersion
) -> EventBase:
@@ -1951,7 +1881,7 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self.auth.check_from_context(
+ await self._event_auth_handler.check_from_context(
room_version, event, context, do_sig_check=False
)
except AuthError as e:
@@ -1960,37 +1890,6 @@ class FederationHandler(BaseHandler):
return event
- async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None:
- """We have received a leave event for a room. Fully process it."""
- event = pdu
-
- logger.debug(
- "on_send_leave_request: Got event: %s, signatures: %s",
- event.event_id,
- event.signatures,
- )
-
- if get_domain_from_id(event.sender) != origin:
- logger.info(
- "Got /send_leave request for user %r from different origin %s",
- event.sender,
- origin,
- )
- raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
-
- event.internal_metadata.outlier = False
-
- context = await self.state_handler.compute_event_context(event)
- await self._auth_and_persist_event(origin, event, context)
-
- logger.debug(
- "on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s",
- event.event_id,
- event.signatures,
- )
-
- return None
-
@log_function
async def on_make_knock_request(
self, origin: str, room_id: str, user_id: str
@@ -2044,7 +1943,7 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self.auth.check_from_context(
+ await self._event_auth_handler.check_from_context(
room_version, event, context, do_sig_check=False
)
except AuthError as e:
@@ -2054,51 +1953,115 @@ class FederationHandler(BaseHandler):
return event
@log_function
- async def on_send_knock_request(
+ async def on_send_membership_event(
self, origin: str, event: EventBase
) -> EventContext:
"""
- We have received a knock event for a room. Verify that event and send it into the room
- on the knocking homeserver's behalf.
+ We have received a join/leave/knock event for a room via send_join/leave/knock.
+
+ Verify that event and send it into the room on the remote homeserver's behalf.
+
+ This is quite similar to on_receive_pdu, with the following principal
+ differences:
+ * only membership events are permitted (and only events with
+ sender==state_key -- ie, no kicks or bans)
+ * *We* send out the event on behalf of the remote server.
+ * We enforce the membership restrictions of restricted rooms.
+ * Rejected events result in an exception rather than being stored.
+
+ There are also other differences, however it is not clear if these are by
+ design or omission. In particular, we do not attempt to backfill any missing
+ prev_events.
Args:
- origin: The remote homeserver of the knocking user.
- event: The knocking member event that has been signed by the remote homeserver.
+ origin: The homeserver of the remote (joining/invited/knocking) user.
+ event: The member event that has been signed by the remote homeserver.
Returns:
The context of the event after inserting it into the room graph.
+
+ Raises:
+ SynapseError if the event is not accepted into the room
"""
logger.debug(
- "on_send_knock_request: Got event: %s, signatures: %s",
+ "on_send_membership_event: Got event: %s, signatures: %s",
event.event_id,
event.signatures,
)
if get_domain_from_id(event.sender) != origin:
logger.info(
- "Got /send_knock request for user %r from different origin %s",
+ "Got send_membership request for user %r from different origin %s",
event.sender,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- event.internal_metadata.outlier = False
+ if event.sender != event.state_key:
+ raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
- context = await self.state_handler.compute_event_context(event)
+ assert not event.internal_metadata.outlier
- event_allowed = await self.third_party_event_rules.check_event_allowed(
- event, context
- )
- if not event_allowed:
- logger.info("Sending of knock %s forbidden by third-party rules", event)
+ # Send this event on behalf of the other server.
+ #
+ # The remote server isn't a full participant in the room at this point, so
+ # may not have an up-to-date list of the other homeservers participating in
+ # the room, so we send it on their behalf.
+ event.internal_metadata.send_on_behalf_of = origin
+
+ context = await self.state_handler.compute_event_context(event)
+ context = await self._check_event_auth(origin, event, context)
+ if context.rejected:
raise SynapseError(
- 403, "This event is not allowed in this context", Codes.FORBIDDEN
+ 403, f"{event.membership} event was rejected", Codes.FORBIDDEN
)
- await self._auth_and_persist_event(origin, event, context)
+ # for joins, we need to check the restrictions of restricted rooms
+ if event.membership == Membership.JOIN:
+ await self._check_join_restrictions(context, event)
+ # for knock events, we run the third-party event rules. It's not entirely clear
+ # why we don't do this for other sorts of membership events.
+ if event.membership == Membership.KNOCK:
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
+ event, context
+ )
+ if not event_allowed:
+ logger.info("Sending of knock %s forbidden by third-party rules", event)
+ raise SynapseError(
+ 403, "This event is not allowed in this context", Codes.FORBIDDEN
+ )
+
+ # all looks good, we can persist the event.
+ await self._run_push_actions_and_persist_event(event, context)
return context
+ async def _check_join_restrictions(
+ self, context: EventContext, event: EventBase
+ ) -> None:
+ """Check that restrictions in restricted join rules are matched
+
+ Called when we receive a join event via send_join.
+
+ Raises an auth error if the restrictions are not matched.
+ """
+ prev_state_ids = await context.get_prev_state_ids()
+
+ # Check if the user is already in the room or invited to the room.
+ user_id = event.state_key
+ prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
+ prev_member_event = None
+ if prev_member_event_id:
+ prev_member_event = await self.store.get_event(prev_member_event_id)
+
+ # Check if the member should be allowed access via membership in a space.
+ await self._event_auth_handler.check_restricted_join_rules(
+ prev_state_ids,
+ event.room_version,
+ user_id,
+ prev_member_event,
+ )
+
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
"""Returns the state at the event. i.e. not including said event."""
@@ -2152,7 +2115,7 @@ class FederationHandler(BaseHandler):
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
- in_room = await self.auth.check_host_in_room(room_id, origin)
+ in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -2187,7 +2150,9 @@ class FederationHandler(BaseHandler):
)
if event:
- in_room = await self.auth.check_host_in_room(event.room_id, origin)
+ in_room = await self._event_auth_handler.check_host_in_room(
+ event.room_id, origin
+ )
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -2240,6 +2205,18 @@ class FederationHandler(BaseHandler):
backfilled=backfilled,
)
+ await self._run_push_actions_and_persist_event(event, context, backfilled)
+
+ async def _run_push_actions_and_persist_event(
+ self, event: EventBase, context: EventContext, backfilled: bool = False
+ ):
+ """Run the push actions for a received event, and persist it.
+
+ Args:
+ event: The event itself.
+ context: The event context.
+ backfilled: True if the event was backfilled.
+ """
try:
if (
not event.internal_metadata.is_outlier()
@@ -2528,7 +2505,7 @@ class FederationHandler(BaseHandler):
latest_events: List[str],
limit: int,
) -> List[EventBase]:
- in_room = await self.auth.check_host_in_room(room_id, origin)
+ in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -2553,9 +2530,9 @@ class FederationHandler(BaseHandler):
origin: str,
event: EventBase,
context: EventContext,
- state: Optional[Iterable[EventBase]],
- auth_events: Optional[MutableStateMap[EventBase]],
- backfilled: bool,
+ state: Optional[Iterable[EventBase]] = None,
+ auth_events: Optional[MutableStateMap[EventBase]] = None,
+ backfilled: bool = False,
) -> EventContext:
"""
Checks whether an event should be rejected (for failing auth checks).
@@ -2591,7 +2568,7 @@ class FederationHandler(BaseHandler):
if not auth_events:
prev_state_ids = await context.get_prev_state_ids()
- auth_events_ids = self.auth.compute_auth_events(
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_x = await self.store.get_events(auth_events_ids)
@@ -3020,7 +2997,7 @@ class FederationHandler(BaseHandler):
"state_key": target_user_id,
}
- if await self.auth.check_host_in_room(room_id, self.hs.hostname):
+ if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(room_version, event_dict)
@@ -3040,7 +3017,9 @@ class FederationHandler(BaseHandler):
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
- await self.auth.check_from_context(room_version, event, context)
+ await self._event_auth_handler.check_from_context(
+ room_version, event, context
+ )
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
@@ -3083,7 +3062,9 @@ class FederationHandler(BaseHandler):
)
try:
- await self.auth.check_from_context(room_version, event, context)
+ await self._event_auth_handler.check_from_context(
+ room_version, event, context
+ )
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
@@ -3171,7 +3152,7 @@ class FederationHandler(BaseHandler):
last_exception = None # type: Optional[Exception]
# for each public key in the 3pid invite event
- for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
+ for public_key_object in event_auth.get_public_keys(invite_event):
try:
# for each sig on the third_party_invite block of the actual invite
for server, signature_block in signed["signatures"].items():
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index db12abd5..66e40a91 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -385,6 +385,7 @@ class EventCreationHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
+ self._event_auth_handler = hs.get_event_auth_handler()
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state = hs.get_state_handler()
@@ -509,6 +510,8 @@ class EventCreationHandler:
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
+ If non-None, prev_event_ids must also be provided.
+
require_consent: Whether to check if the requester has
consented to the privacy policy.
@@ -581,6 +584,9 @@ class EventCreationHandler:
# Strip down the auth_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
if auth_event_ids is not None:
+ # If auth events are provided, prev events must be also.
+ assert prev_event_ids is not None
+
temp_event = await builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
@@ -592,7 +598,7 @@ class EventCreationHandler:
(e.type, e.state_key): e.event_id for e in auth_events
}
# Actually strip down and use the necessary auth events
- auth_event_ids = self.auth.compute_auth_events(
+ auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=auth_event_state_map,
for_verification=False,
@@ -784,6 +790,8 @@ class EventCreationHandler:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
+
+ If non-None, prev_event_ids must also be provided.
ratelimit: Whether to rate limit this send.
txn_id: The transaction ID.
ignore_shadow_ban: True if shadow-banned users should be allowed to
@@ -1049,7 +1057,9 @@ class EventCreationHandler:
assert event.content["membership"] == Membership.LEAVE
else:
try:
- await self.auth.check_from_context(room_version, event, context)
+ await self._event_auth_handler.check_from_context(
+ room_version, event, context
+ )
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
raise err
@@ -1374,7 +1384,7 @@ class EventCreationHandler:
raise AuthError(403, "Redacting server ACL events is not permitted")
prev_state_ids = await context.get_prev_state_ids()
- auth_events_ids = self.auth.compute_auth_events(
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_map = await self.store.get_events(auth_events_ids)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index ca1ed6a5..26ef0161 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -15,9 +15,10 @@
"""Contains functions for registering clients."""
import logging
-from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from prometheus_client import Counter
+from typing_extensions import TypedDict
from synapse import types
from synapse.api.constants import MAX_USERID_LENGTH, EventTypes, JoinRules, LoginType
@@ -54,6 +55,16 @@ login_counter = Counter(
["guest", "auth_provider"],
)
+LoginDict = TypedDict(
+ "LoginDict",
+ {
+ "device_id": str,
+ "access_token": str,
+ "valid_until_ms": Optional[int],
+ "refresh_token": Optional[str],
+ },
+)
+
class RegistrationHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
@@ -85,6 +96,7 @@ class RegistrationHandler(BaseHandler):
self.pusher_pool = hs.get_pusherpool()
self.session_lifetime = hs.config.session_lifetime
+ self.access_token_lifetime = hs.config.access_token_lifetime
async def check_username(
self,
@@ -386,11 +398,32 @@ class RegistrationHandler(BaseHandler):
room_alias = RoomAlias.from_string(r)
if self.hs.hostname != room_alias.domain:
- logger.warning(
- "Cannot create room alias %s, "
- "it does not match server domain",
+ # If the alias is remote, try to join the room. This might fail
+ # because the room might be invite only, but we don't have any local
+ # user in the room to invite this one with, so at this point that's
+ # the best we can do.
+ logger.info(
+ "Cannot automatically create room with alias %s as it isn't"
+ " local, trying to join the room instead",
r,
)
+
+ (
+ room,
+ remote_room_hosts,
+ ) = await room_member_handler.lookup_room_alias(room_alias)
+ room_id = room.to_string()
+
+ await room_member_handler.update_membership(
+ requester=create_requester(
+ user_id, authenticated_entity=self._server_name
+ ),
+ target=UserID.from_string(user_id),
+ room_id=room_id,
+ remote_room_hosts=remote_room_hosts,
+ action="join",
+ ratelimit=False,
+ )
else:
# A shallow copy is OK here since the only key that is
# modified is room_alias_name.
@@ -448,22 +481,32 @@ class RegistrationHandler(BaseHandler):
)
# Calculate whether the room requires an invite or can be
- # joined directly. Note that unless a join rule of public exists,
- # it is treated as requiring an invite.
- requires_invite = True
-
- state = await self.store.get_filtered_current_state_ids(
- room_id, StateFilter.from_types([(EventTypes.JoinRules, "")])
+ # joined directly. By default, we consider the room as requiring an
+ # invite if the homeserver is in the room (unless told otherwise by the
+ # join rules). Otherwise we consider it as being joinable, at the risk of
+ # failing to join, but in this case there's little more we can do since
+ # we don't have a local user in the room to craft up an invite with.
+ requires_invite = await self.store.is_host_joined(
+ room_id,
+ self.server_name,
)
- event_id = state.get((EventTypes.JoinRules, ""))
- if event_id:
- join_rules_event = await self.store.get_event(
- event_id, allow_none=True
+ if requires_invite:
+ # If the server is in the room, check if the room is public.
+ state = await self.store.get_filtered_current_state_ids(
+ room_id, StateFilter.from_types([(EventTypes.JoinRules, "")])
)
- if join_rules_event:
- join_rule = join_rules_event.content.get("join_rule", None)
- requires_invite = join_rule and join_rule != JoinRules.PUBLIC
+
+ event_id = state.get((EventTypes.JoinRules, ""))
+ if event_id:
+ join_rules_event = await self.store.get_event(
+ event_id, allow_none=True
+ )
+ if join_rules_event:
+ join_rule = join_rules_event.content.get("join_rule", None)
+ requires_invite = (
+ join_rule and join_rule != JoinRules.PUBLIC
+ )
# Send the invite, if necessary.
if requires_invite:
@@ -665,7 +708,8 @@ class RegistrationHandler(BaseHandler):
is_guest: bool = False,
is_appservice_ghost: bool = False,
auth_provider_id: Optional[str] = None,
- ) -> Tuple[str, str]:
+ should_issue_refresh_token: bool = False,
+ ) -> Tuple[str, str, Optional[int], Optional[str]]:
"""Register a device for a user and generate an access token.
The access token will be limited by the homeserver's session_lifetime config.
@@ -677,8 +721,9 @@ class RegistrationHandler(BaseHandler):
is_guest: Whether this is a guest account
auth_provider_id: The SSO IdP the user used, if any (just used for the
prometheus metrics).
+ should_issue_refresh_token: Whether it should also issue a refresh token
Returns:
- Tuple of device ID and access token
+ Tuple of device ID, access token, access token expiration time and refresh token
"""
res = await self._register_device_client(
user_id=user_id,
@@ -686,6 +731,7 @@ class RegistrationHandler(BaseHandler):
initial_display_name=initial_display_name,
is_guest=is_guest,
is_appservice_ghost=is_appservice_ghost,
+ should_issue_refresh_token=should_issue_refresh_token,
)
login_counter.labels(
@@ -693,7 +739,12 @@ class RegistrationHandler(BaseHandler):
auth_provider=(auth_provider_id or ""),
).inc()
- return res["device_id"], res["access_token"]
+ return (
+ res["device_id"],
+ res["access_token"],
+ res["valid_until_ms"],
+ res["refresh_token"],
+ )
async def register_device_inner(
self,
@@ -702,7 +753,8 @@ class RegistrationHandler(BaseHandler):
initial_display_name: Optional[str],
is_guest: bool = False,
is_appservice_ghost: bool = False,
- ) -> Dict[str, str]:
+ should_issue_refresh_token: bool = False,
+ ) -> LoginDict:
"""Helper for register_device
Does the bits that need doing on the main process. Not for use outside this
@@ -717,6 +769,9 @@ class RegistrationHandler(BaseHandler):
)
valid_until_ms = self.clock.time_msec() + self.session_lifetime
+ refresh_token = None
+ refresh_token_id = None
+
registered_device_id = await self.device_handler.check_device_registered(
user_id, device_id, initial_display_name
)
@@ -724,14 +779,30 @@ class RegistrationHandler(BaseHandler):
assert valid_until_ms is None
access_token = self.macaroon_gen.generate_guest_access_token(user_id)
else:
+ if should_issue_refresh_token:
+ (
+ refresh_token,
+ refresh_token_id,
+ ) = await self._auth_handler.get_refresh_token_for_user_id(
+ user_id,
+ device_id=registered_device_id,
+ )
+ valid_until_ms = self.clock.time_msec() + self.access_token_lifetime
+
access_token = await self._auth_handler.get_access_token_for_user_id(
user_id,
device_id=registered_device_id,
valid_until_ms=valid_until_ms,
is_appservice_ghost=is_appservice_ghost,
+ refresh_token_id=refresh_token_id,
)
- return {"device_id": registered_device_id, "access_token": access_token}
+ return {
+ "device_id": registered_device_id,
+ "access_token": access_token,
+ "valid_until_ms": valid_until_ms,
+ "refresh_token": refresh_token,
+ }
async def post_registration_actions(
self, user_id: str, auth_result: dict, access_token: Optional[str]
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 835d874c..579b1b93 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -83,6 +83,7 @@ class RoomCreationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
+ self._event_auth_handler = hs.get_event_auth_handler()
self.config = hs.config
# Room state based off defined presets
@@ -226,7 +227,7 @@ class RoomCreationHandler(BaseHandler):
},
)
old_room_version = await self.store.get_room_version_id(old_room_id)
- await self.auth.check_from_context(
+ await self._event_auth_handler.check_from_context(
old_room_version, tombstone_event, tombstone_context
)
diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py
index 17fc47ce..b585057e 100644
--- a/synapse/handlers/space_summary.py
+++ b/synapse/handlers/space_summary.py
@@ -25,6 +25,7 @@ from synapse.api.constants import (
EventTypes,
HistoryVisibility,
Membership,
+ RoomTypes,
)
from synapse.events import EventBase
from synapse.events.utils import format_event_for_client_v2
@@ -318,7 +319,8 @@ class SpaceSummaryHandler:
Returns:
A tuple of:
- An iterable of a single value of the room.
+ The room information, if the room should be returned to the
+ user. None, otherwise.
An iterable of the sorted children events. This may be limited
to a maximum size or may include all children.
@@ -328,7 +330,11 @@ class SpaceSummaryHandler:
room_entry = await self._build_room_entry(room_id)
- # look for child rooms/spaces.
+ # If the room is not a space, return just the room information.
+ if room_entry.get("room_type") != RoomTypes.SPACE:
+ return room_entry, ()
+
+ # Otherwise, look for child rooms/spaces.
child_events = await self._get_child_events(room_id)
if suggested_only:
@@ -348,6 +354,7 @@ class SpaceSummaryHandler:
event_format=format_event_for_client_v2,
)
)
+
return room_entry, events_result
async def _summarize_remote_room(
@@ -465,7 +472,7 @@ class SpaceSummaryHandler:
# If this is a request over federation, check if the host is in the room or
# is in one of the spaces specified via the join rules.
elif origin:
- if await self._auth.check_host_in_room(room_id, origin):
+ if await self._event_auth_handler.check_host_in_room(room_id, origin):
return True
# Alternately, if the host has a user in any of the spaces specified
@@ -478,7 +485,9 @@ class SpaceSummaryHandler:
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
for space_id in allowed_rooms:
- if await self._auth.check_host_in_room(space_id, origin):
+ if await self._event_auth_handler.check_host_in_room(
+ space_id, origin
+ ):
return True
# otherwise, check if the room is peekable
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 845651e6..efbc6d5b 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -728,7 +728,7 @@ def set_cors_headers(request: Request):
)
request.setHeader(
b"Access-Control-Allow-Headers",
- b"Origin, X-Requested-With, Content-Type, Accept, Authorization, Date",
+ b"X-Requested-With, Content-Type, Authorization, Date",
)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index fda8da21..6ba2ce1e 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -113,8 +113,18 @@ def parse_boolean_from_args(args, name, default=None, required=False):
def parse_bytes_from_args(
args: Dict[bytes, List[bytes]],
name: str,
+ default: Optional[bytes] = None,
+) -> Optional[bytes]:
+ ...
+
+
+@overload
+def parse_bytes_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
default: Literal[None] = None,
- required: Literal[True] = True,
+ *,
+ required: Literal[True],
) -> bytes:
...
@@ -197,7 +207,12 @@ def parse_string(
"""
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
return parse_string_from_args(
- args, name, default, required, allowed_values, encoding
+ args,
+ name,
+ default,
+ required=required,
+ allowed_values=allowed_values,
+ encoding=encoding,
)
@@ -227,7 +242,20 @@ def parse_strings_from_args(
args: Dict[bytes, List[bytes]],
name: str,
default: Optional[List[str]] = None,
- required: Literal[True] = True,
+ *,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> Optional[List[str]]:
+ ...
+
+
+@overload
+def parse_strings_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[List[str]] = None,
+ *,
+ required: Literal[True],
allowed_values: Optional[Iterable[str]] = None,
encoding: str = "ascii",
) -> List[str]:
@@ -239,6 +267,7 @@ def parse_strings_from_args(
args: Dict[bytes, List[bytes]],
name: str,
default: Optional[List[str]] = None,
+ *,
required: bool = False,
allowed_values: Optional[Iterable[str]] = None,
encoding: str = "ascii",
@@ -299,7 +328,20 @@ def parse_string_from_args(
args: Dict[bytes, List[bytes]],
name: str,
default: Optional[str] = None,
- required: Literal[True] = True,
+ *,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> Optional[str]:
+ ...
+
+
+@overload
+def parse_string_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[str] = None,
+ *,
+ required: Literal[True],
allowed_values: Optional[Iterable[str]] = None,
encoding: str = "ascii",
) -> str:
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 58b255eb..721c45ab 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -168,7 +168,7 @@ class ModuleApi:
"Using deprecated ModuleApi.register which creates a dummy user device."
)
user_id = yield self.register_user(localpart, displayname, emails or [])
- _, access_token = yield self.register_device(user_id)
+ _, access_token, _, _ = yield self.register_device(user_id)
return user_id, access_token
def register_user(
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 350646f4..669ea462 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -104,7 +104,7 @@ class BulkPushRuleEvaluator:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastore()
- self.auth = hs.get_auth()
+ self._event_auth_handler = hs.get_event_auth_handler()
# Used by `RulesForRoom` to ensure only one thing mutates the cache at a
# time. Keyed off room_id.
@@ -172,7 +172,7 @@ class BulkPushRuleEvaluator:
# not having a power level event is an extreme edge case
auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
else:
- auth_events_ids = self.auth.compute_auth_events(
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=False
)
auth_events_dict = await self.store.get_events(auth_events_ids)
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index c2e8c002..550bd5c9 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -36,20 +36,29 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
@staticmethod
async def _serialize_payload(
- user_id, device_id, initial_display_name, is_guest, is_appservice_ghost
+ user_id,
+ device_id,
+ initial_display_name,
+ is_guest,
+ is_appservice_ghost,
+ should_issue_refresh_token,
):
"""
Args:
+ user_id (int)
device_id (str|None): Device ID to use, if None a new one is
generated.
initial_display_name (str|None)
is_guest (bool)
+ is_appservice_ghost (bool)
+ should_issue_refresh_token (bool)
"""
return {
"device_id": device_id,
"initial_display_name": initial_display_name,
"is_guest": is_guest,
"is_appservice_ghost": is_appservice_ghost,
+ "should_issue_refresh_token": should_issue_refresh_token,
}
async def _handle_request(self, request, user_id):
@@ -59,6 +68,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
initial_display_name = content["initial_display_name"]
is_guest = content["is_guest"]
is_appservice_ghost = content["is_appservice_ghost"]
+ should_issue_refresh_token = content["should_issue_refresh_token"]
res = await self.registration_handler.register_device_inner(
user_id,
@@ -66,6 +76,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
initial_display_name,
is_guest,
is_appservice_ghost=is_appservice_ghost,
+ should_issue_refresh_token=should_issue_refresh_token,
)
return 200, res
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index f6be5f10..cbcb60fe 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -14,7 +14,9 @@
import logging
import re
-from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional
+
+from typing_extensions import TypedDict
from synapse.api.errors import Codes, LoginError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
@@ -25,6 +27,8 @@ from synapse.http import get_request_uri
from synapse.http.server import HttpServer, finish_request
from synapse.http.servlet import (
RestServlet,
+ assert_params_in_dict,
+ parse_boolean,
parse_bytes_from_args,
parse_json_object_from_request,
parse_string,
@@ -40,6 +44,21 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+LoginResponse = TypedDict(
+ "LoginResponse",
+ {
+ "user_id": str,
+ "access_token": str,
+ "home_server": str,
+ "expires_in_ms": Optional[int],
+ "refresh_token": Optional[str],
+ "device_id": str,
+ "well_known": Optional[Dict[str, Any]],
+ },
+ total=False,
+)
+
+
class LoginRestServlet(RestServlet):
PATTERNS = client_patterns("/login$", v1=True)
CAS_TYPE = "m.login.cas"
@@ -48,6 +67,7 @@ class LoginRestServlet(RestServlet):
JWT_TYPE = "org.matrix.login.jwt"
JWT_TYPE_DEPRECATED = "m.login.jwt"
APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service"
+ REFRESH_TOKEN_PARAM = "org.matrix.msc2918.refresh_token"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -65,9 +85,12 @@ class LoginRestServlet(RestServlet):
self.cas_enabled = hs.config.cas_enabled
self.oidc_enabled = hs.config.oidc_enabled
self._msc2858_enabled = hs.config.experimental.msc2858_enabled
+ self._msc2918_enabled = hs.config.access_token_lifetime is not None
self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+
self.auth_handler = self.hs.get_auth_handler()
self.registration_handler = hs.get_registration_handler()
self._sso_handler = hs.get_sso_handler()
@@ -138,6 +161,15 @@ class LoginRestServlet(RestServlet):
async def on_POST(self, request: SynapseRequest):
login_submission = parse_json_object_from_request(request)
+ if self._msc2918_enabled:
+ # Check if this login should also issue a refresh token, as per
+ # MSC2918
+ should_issue_refresh_token = parse_boolean(
+ request, name=LoginRestServlet.REFRESH_TOKEN_PARAM, default=False
+ )
+ else:
+ should_issue_refresh_token = False
+
try:
if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE:
appservice = self.auth.get_appservice_by_req(request)
@@ -147,19 +179,32 @@ class LoginRestServlet(RestServlet):
None, request.getClientIP()
)
- result = await self._do_appservice_login(login_submission, appservice)
+ result = await self._do_appservice_login(
+ login_submission,
+ appservice,
+ should_issue_refresh_token=should_issue_refresh_token,
+ )
elif self.jwt_enabled and (
login_submission["type"] == LoginRestServlet.JWT_TYPE
or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED
):
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
- result = await self._do_jwt_login(login_submission)
+ result = await self._do_jwt_login(
+ login_submission,
+ should_issue_refresh_token=should_issue_refresh_token,
+ )
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
- result = await self._do_token_login(login_submission)
+ result = await self._do_token_login(
+ login_submission,
+ should_issue_refresh_token=should_issue_refresh_token,
+ )
else:
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
- result = await self._do_other_login(login_submission)
+ result = await self._do_other_login(
+ login_submission,
+ should_issue_refresh_token=should_issue_refresh_token,
+ )
except KeyError:
raise SynapseError(400, "Missing JSON keys.")
@@ -169,7 +214,10 @@ class LoginRestServlet(RestServlet):
return 200, result
async def _do_appservice_login(
- self, login_submission: JsonDict, appservice: ApplicationService
+ self,
+ login_submission: JsonDict,
+ appservice: ApplicationService,
+ should_issue_refresh_token: bool = False,
):
identifier = login_submission.get("identifier")
logger.info("Got appservice login request with identifier: %r", identifier)
@@ -198,14 +246,21 @@ class LoginRestServlet(RestServlet):
raise LoginError(403, "Invalid access_token", errcode=Codes.FORBIDDEN)
return await self._complete_login(
- qualified_user_id, login_submission, ratelimit=appservice.is_rate_limited()
+ qualified_user_id,
+ login_submission,
+ ratelimit=appservice.is_rate_limited(),
+ should_issue_refresh_token=should_issue_refresh_token,
)
- async def _do_other_login(self, login_submission: JsonDict) -> Dict[str, str]:
+ async def _do_other_login(
+ self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ ) -> LoginResponse:
"""Handle non-token/saml/jwt logins
Args:
login_submission:
+ should_issue_refresh_token: True if this login should issue
+ a refresh token alongside the access token.
Returns:
HTTP response
@@ -224,7 +279,10 @@ class LoginRestServlet(RestServlet):
login_submission, ratelimit=True
)
result = await self._complete_login(
- canonical_user_id, login_submission, callback
+ canonical_user_id,
+ login_submission,
+ callback,
+ should_issue_refresh_token=should_issue_refresh_token,
)
return result
@@ -232,11 +290,12 @@ class LoginRestServlet(RestServlet):
self,
user_id: str,
login_submission: JsonDict,
- callback: Optional[Callable[[Dict[str, str]], Awaitable[None]]] = None,
+ callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None,
create_non_existent_users: bool = False,
ratelimit: bool = True,
auth_provider_id: Optional[str] = None,
- ) -> Dict[str, str]:
+ should_issue_refresh_token: bool = False,
+ ) -> LoginResponse:
"""Called when we've successfully authed the user and now need to
actually login them in (e.g. create devices). This gets called on
all successful logins.
@@ -253,6 +312,8 @@ class LoginRestServlet(RestServlet):
ratelimit: Whether to ratelimit the login request.
auth_provider_id: The SSO IdP the user used, if any (just used for the
prometheus metrics).
+ should_issue_refresh_token: True if this login should issue
+ a refresh token alongside the access token.
Returns:
result: Dictionary of account information after successful login.
@@ -274,28 +335,48 @@ class LoginRestServlet(RestServlet):
device_id = login_submission.get("device_id")
initial_display_name = login_submission.get("initial_device_display_name")
- device_id, access_token = await self.registration_handler.register_device(
- user_id, device_id, initial_display_name, auth_provider_id=auth_provider_id
+ (
+ device_id,
+ access_token,
+ valid_until_ms,
+ refresh_token,
+ ) = await self.registration_handler.register_device(
+ user_id,
+ device_id,
+ initial_display_name,
+ auth_provider_id=auth_provider_id,
+ should_issue_refresh_token=should_issue_refresh_token,
)
- result = {
- "user_id": user_id,
- "access_token": access_token,
- "home_server": self.hs.hostname,
- "device_id": device_id,
- }
+ result = LoginResponse(
+ user_id=user_id,
+ access_token=access_token,
+ home_server=self.hs.hostname,
+ device_id=device_id,
+ )
+
+ if valid_until_ms is not None:
+ expires_in_ms = valid_until_ms - self.clock.time_msec()
+ result["expires_in_ms"] = expires_in_ms
+
+ if refresh_token is not None:
+ result["refresh_token"] = refresh_token
if callback is not None:
await callback(result)
return result
- async def _do_token_login(self, login_submission: JsonDict) -> Dict[str, str]:
+ async def _do_token_login(
+ self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ ) -> LoginResponse:
"""
Handle the final stage of SSO login.
Args:
- login_submission: The JSON request body.
+ login_submission: The JSON request body.
+ should_issue_refresh_token: True if this login should issue
+ a refresh token alongside the access token.
Returns:
The body of the JSON response.
@@ -309,9 +390,12 @@ class LoginRestServlet(RestServlet):
login_submission,
self.auth_handler._sso_login_callback,
auth_provider_id=res.auth_provider_id,
+ should_issue_refresh_token=should_issue_refresh_token,
)
- async def _do_jwt_login(self, login_submission: JsonDict) -> Dict[str, str]:
+ async def _do_jwt_login(
+ self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ ) -> LoginResponse:
token = login_submission.get("token", None)
if token is None:
raise LoginError(
@@ -342,7 +426,10 @@ class LoginRestServlet(RestServlet):
user_id = UserID(user, self.hs.hostname).to_string()
result = await self._complete_login(
- user_id, login_submission, create_non_existent_users=True
+ user_id,
+ login_submission,
+ create_non_existent_users=True,
+ should_issue_refresh_token=should_issue_refresh_token,
)
return result
@@ -371,6 +458,42 @@ def _get_auth_flow_dict_for_idp(
return e
+class RefreshTokenServlet(RestServlet):
+ PATTERNS = client_patterns(
+ "/org.matrix.msc2918.refresh_token/refresh$", releases=(), unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ self._auth_handler = hs.get_auth_handler()
+ self._clock = hs.get_clock()
+ self.access_token_lifetime = hs.config.access_token_lifetime
+
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ ):
+ refresh_submission = parse_json_object_from_request(request)
+
+ assert_params_in_dict(refresh_submission, ["refresh_token"])
+ token = refresh_submission["refresh_token"]
+ if not isinstance(token, str):
+ raise SynapseError(400, "Invalid param: refresh_token", Codes.INVALID_PARAM)
+
+ valid_until_ms = self._clock.time_msec() + self.access_token_lifetime
+ access_token, refresh_token = await self._auth_handler.refresh_token(
+ token, valid_until_ms
+ )
+ expires_in_ms = valid_until_ms - self._clock.time_msec()
+ return (
+ 200,
+ {
+ "access_token": access_token,
+ "refresh_token": refresh_token,
+ "expires_in_ms": expires_in_ms,
+ },
+ )
+
+
class SsoRedirectServlet(RestServlet):
PATTERNS = list(client_patterns("/login/(cas|sso)/redirect$", v1=True)) + [
re.compile(
@@ -477,6 +600,8 @@ class CasTicketServlet(RestServlet):
def register_servlets(hs, http_server):
LoginRestServlet(hs).register(http_server)
+ if hs.config.access_token_lifetime is not None:
+ RefreshTokenServlet(hs).register(http_server)
SsoRedirectServlet(hs).register(http_server)
if hs.config.cas_enabled:
CasTicketServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index a30a5df1..4d31584a 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -41,11 +41,13 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
+ parse_boolean,
parse_json_object_from_request,
parse_string,
)
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
+from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import assert_valid_client_secret, random_string
@@ -399,6 +401,7 @@ class RegisterRestServlet(RestServlet):
self.password_policy_handler = hs.get_password_policy_handler()
self.clock = hs.get_clock()
self._registration_enabled = self.hs.config.enable_registration
+ self._msc2918_enabled = hs.config.access_token_lifetime is not None
self._registration_flows = _calculate_registration_flows(
hs.config, self.auth_handler
@@ -424,6 +427,15 @@ class RegisterRestServlet(RestServlet):
"Do not understand membership kind: %s" % (kind.decode("utf8"),)
)
+ if self._msc2918_enabled:
+ # Check if this registration should also issue a refresh token, as
+ # per MSC2918
+ should_issue_refresh_token = parse_boolean(
+ request, name="org.matrix.msc2918.refresh_token", default=False
+ )
+ else:
+ should_issue_refresh_token = False
+
# Pull out the provided username and do basic sanity checks early since
# the auth layer will store these in sessions.
desired_username = None
@@ -462,7 +474,10 @@ class RegisterRestServlet(RestServlet):
raise SynapseError(400, "Desired Username is missing or not a string")
result = await self._do_appservice_registration(
- desired_username, access_token, body
+ desired_username,
+ access_token,
+ body,
+ should_issue_refresh_token=should_issue_refresh_token,
)
return 200, result
@@ -665,7 +680,9 @@ class RegisterRestServlet(RestServlet):
registered = True
return_dict = await self._create_registration_details(
- registered_user_id, params
+ registered_user_id,
+ params,
+ should_issue_refresh_token=should_issue_refresh_token,
)
if registered:
@@ -677,7 +694,9 @@ class RegisterRestServlet(RestServlet):
return 200, return_dict
- async def _do_appservice_registration(self, username, as_token, body):
+ async def _do_appservice_registration(
+ self, username, as_token, body, should_issue_refresh_token: bool = False
+ ):
user_id = await self.registration_handler.appservice_register(
username, as_token
)
@@ -685,19 +704,27 @@ class RegisterRestServlet(RestServlet):
user_id,
body,
is_appservice_ghost=True,
+ should_issue_refresh_token=should_issue_refresh_token,
)
async def _create_registration_details(
- self, user_id, params, is_appservice_ghost=False
+ self,
+ user_id: str,
+ params: JsonDict,
+ is_appservice_ghost: bool = False,
+ should_issue_refresh_token: bool = False,
):
"""Complete registration of newly-registered user
Allocates device_id if one was not given; also creates access_token.
Args:
- (str) user_id: full canonical @user:id
- (object) params: registration parameters, from which we pull
- device_id, initial_device_name and inhibit_login
+ user_id: full canonical @user:id
+ params: registration parameters, from which we pull device_id,
+ initial_device_name and inhibit_login
+ is_appservice_ghost
+ should_issue_refresh_token: True if this registration should issue
+ a refresh token alongside the access token.
Returns:
dictionary for response from /register
"""
@@ -705,15 +732,29 @@ class RegisterRestServlet(RestServlet):
if not params.get("inhibit_login", False):
device_id = params.get("device_id")
initial_display_name = params.get("initial_device_display_name")
- device_id, access_token = await self.registration_handler.register_device(
+ (
+ device_id,
+ access_token,
+ valid_until_ms,
+ refresh_token,
+ ) = await self.registration_handler.register_device(
user_id,
device_id,
initial_display_name,
is_guest=False,
is_appservice_ghost=is_appservice_ghost,
+ should_issue_refresh_token=should_issue_refresh_token,
)
result.update({"access_token": access_token, "device_id": device_id})
+
+ if valid_until_ms is not None:
+ expires_in_ms = valid_until_ms - self.clock.time_msec()
+ result["expires_in_ms"] = expires_in_ms
+
+ if refresh_token is not None:
+ result["refresh_token"] = refresh_token
+
return result
async def _do_guest_registration(self, params, address=None):
@@ -727,19 +768,30 @@ class RegisterRestServlet(RestServlet):
# we have nowhere to store it.
device_id = synapse.api.auth.GUEST_DEVICE_ID
initial_display_name = params.get("initial_device_display_name")
- device_id, access_token = await self.registration_handler.register_device(
+ (
+ device_id,
+ access_token,
+ valid_until_ms,
+ refresh_token,
+ ) = await self.registration_handler.register_device(
user_id, device_id, initial_display_name, is_guest=True
)
- return (
- 200,
- {
- "user_id": user_id,
- "device_id": device_id,
- "access_token": access_token,
- "home_server": self.hs.hostname,
- },
- )
+ result = {
+ "user_id": user_id,
+ "device_id": device_id,
+ "access_token": access_token,
+ "home_server": self.hs.hostname,
+ }
+
+ if valid_until_ms is not None:
+ expires_in_ms = valid_until_ms - self.clock.time_msec()
+ result["expires_in_ms"] = expires_in_ms
+
+ if refresh_token is not None:
+ result["refresh_token"] = refresh_token
+
+ return 200, result
def _calculate_registration_flows(
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 042e1788..ecbbcf38 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -13,6 +13,7 @@
# limitations under the License.
import itertools
import logging
+from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple
from synapse.api.constants import Membership, PresenceState
@@ -232,29 +233,51 @@ class SyncRestServlet(RestServlet):
)
logger.debug("building sync response dict")
- return {
- "account_data": {"events": sync_result.account_data},
- "to_device": {"events": sync_result.to_device},
- "device_lists": {
- "changed": list(sync_result.device_lists.changed),
- "left": list(sync_result.device_lists.left),
- },
- "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now),
- "rooms": {
- Membership.JOIN: joined,
- Membership.INVITE: invited,
- Membership.KNOCK: knocked,
- Membership.LEAVE: archived,
- },
- "groups": {
- Membership.JOIN: sync_result.groups.join,
- Membership.INVITE: sync_result.groups.invite,
- Membership.LEAVE: sync_result.groups.leave,
- },
- "device_one_time_keys_count": sync_result.device_one_time_keys_count,
- "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types,
- "next_batch": await sync_result.next_batch.to_string(self.store),
- }
+
+ response: dict = defaultdict(dict)
+ response["next_batch"] = await sync_result.next_batch.to_string(self.store)
+
+ if sync_result.account_data:
+ response["account_data"] = {"events": sync_result.account_data}
+ if sync_result.presence:
+ response["presence"] = SyncRestServlet.encode_presence(
+ sync_result.presence, time_now
+ )
+
+ if sync_result.to_device:
+ response["to_device"] = {"events": sync_result.to_device}
+
+ if sync_result.device_lists.changed:
+ response["device_lists"]["changed"] = list(sync_result.device_lists.changed)
+ if sync_result.device_lists.left:
+ response["device_lists"]["left"] = list(sync_result.device_lists.left)
+
+ if sync_result.device_one_time_keys_count:
+ response[
+ "device_one_time_keys_count"
+ ] = sync_result.device_one_time_keys_count
+ if sync_result.device_unused_fallback_key_types:
+ response[
+ "org.matrix.msc2732.device_unused_fallback_key_types"
+ ] = sync_result.device_unused_fallback_key_types
+
+ if joined:
+ response["rooms"][Membership.JOIN] = joined
+ if invited:
+ response["rooms"][Membership.INVITE] = invited
+ if knocked:
+ response["rooms"][Membership.KNOCK] = knocked
+ if archived:
+ response["rooms"][Membership.LEAVE] = archived
+
+ if sync_result.groups.join:
+ response["groups"][Membership.JOIN] = sync_result.groups.join
+ if sync_result.groups.invite:
+ response["groups"][Membership.INVITE] = sync_result.groups.invite
+ if sync_result.groups.leave:
+ response["groups"][Membership.LEAVE] = sync_result.groups.leave
+
+ return response
@staticmethod
def encode_presence(events, time_now):
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index d470cdac..33c42cf9 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -111,7 +111,7 @@ def make_conn(
db_config: DatabaseConnectionConfig,
engine: BaseDatabaseEngine,
default_txn_name: str,
-) -> Connection:
+) -> "LoggingDatabaseConnection":
"""Make a new connection to the database and return it.
Returns:
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index f23f8c6e..c4474df9 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -16,6 +16,8 @@ import logging
from queue import Empty, PriorityQueue
from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple
+from prometheus_client import Gauge
+
from synapse.api.constants import MAX_DEPTH
from synapse.api.errors import StoreError
from synapse.api.room_versions import RoomVersion
@@ -32,6 +34,16 @@ from synapse.util.caches.descriptors import cached
from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter
+oldest_pdu_in_federation_staging = Gauge(
+ "synapse_federation_server_oldest_inbound_pdu_in_staging",
+ "The age in seconds since we received the oldest pdu in the federation staging area",
+)
+
+number_pdus_in_federation_queue = Gauge(
+ "synapse_federation_server_number_inbound_pdu_in_staging",
+ "The total number of events in the inbound federation staging",
+)
+
logger = logging.getLogger(__name__)
@@ -54,6 +66,8 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
500000, "_event_auth_cache", size_callback=len
) # type: LruCache[str, List[Tuple[str, int]]]
+ self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
+
async def get_auth_chain(
self, room_id: str, event_ids: Collection[str], include_given: bool = False
) -> List[EventBase]:
@@ -1075,16 +1089,62 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
self,
origin: str,
event_id: str,
- ) -> None:
- """Remove the given event from the staging area"""
- await self.db_pool.simple_delete(
- table="federation_inbound_events_staging",
- keyvalues={
- "origin": origin,
- "event_id": event_id,
- },
- desc="remove_received_event_from_staging",
- )
+ ) -> Optional[int]:
+ """Remove the given event from the staging area.
+
+ Returns:
+ The received_ts of the row that was deleted, if any.
+ """
+ if self.db_pool.engine.supports_returning:
+
+ def _remove_received_event_from_staging_txn(txn):
+ sql = """
+ DELETE FROM federation_inbound_events_staging
+ WHERE origin = ? AND event_id = ?
+ RETURNING received_ts
+ """
+
+ txn.execute(sql, (origin, event_id))
+ return txn.fetchone()
+
+ row = await self.db_pool.runInteraction(
+ "remove_received_event_from_staging",
+ _remove_received_event_from_staging_txn,
+ db_autocommit=True,
+ )
+ if row is None:
+ return None
+
+ return row[0]
+
+ else:
+
+ def _remove_received_event_from_staging_txn(txn):
+ received_ts = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="federation_inbound_events_staging",
+ keyvalues={
+ "origin": origin,
+ "event_id": event_id,
+ },
+ retcol="received_ts",
+ allow_none=True,
+ )
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="federation_inbound_events_staging",
+ keyvalues={
+ "origin": origin,
+ "event_id": event_id,
+ },
+ )
+
+ return received_ts
+
+ return await self.db_pool.runInteraction(
+ "remove_received_event_from_staging",
+ _remove_received_event_from_staging_txn,
+ )
async def get_next_staged_event_id_for_room(
self,
@@ -1147,6 +1207,40 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
return origin, event
+ async def get_all_rooms_with_staged_incoming_events(self) -> List[str]:
+ """Get the room IDs of all events currently staged."""
+ return await self.db_pool.simple_select_onecol(
+ table="federation_inbound_events_staging",
+ keyvalues={},
+ retcol="DISTINCT room_id",
+ desc="get_all_rooms_with_staged_incoming_events",
+ )
+
+ @wrap_as_background_process("_get_stats_for_federation_staging")
+ async def _get_stats_for_federation_staging(self):
+ """Update the prometheus metrics for the inbound federation staging area."""
+
+ def _get_stats_for_federation_staging_txn(txn):
+ txn.execute(
+ "SELECT coalesce(count(*), 0) FROM federation_inbound_events_staging"
+ )
+ (count,) = txn.fetchone()
+
+ txn.execute(
+ "SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging"
+ )
+
+ (age,) = txn.fetchone()
+
+ return count, age
+
+ count, age = await self.db_pool.runInteraction(
+ "_get_stats_for_federation_staging", _get_stats_for_federation_staging_txn
+ )
+
+ number_pdus_in_federation_queue.set(count)
+ oldest_pdu_in_federation_staging.set(age)
+
class EventFederationStore(EventFederationWorkerStore):
"""Responsible for storing and serving up the various graphs associated
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index cbe4be14..29f33bac 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -29,6 +29,34 @@ from synapse.types import JsonDict
logger = logging.getLogger(__name__)
+_REPLACE_STREAM_ORDERING_SQL_COMMANDS = (
+ # there should be no leftover rows without a stream_ordering2, but just in case...
+ "UPDATE events SET stream_ordering2 = stream_ordering WHERE stream_ordering2 IS NULL",
+ # now we can drop the rule and switch the columns
+ "DROP RULE populate_stream_ordering2 ON events",
+ "ALTER TABLE events DROP COLUMN stream_ordering",
+ "ALTER TABLE events RENAME COLUMN stream_ordering2 TO stream_ordering",
+ # ... and finally, rename the indexes into place for consistency with sqlite
+ "ALTER INDEX event_contains_url_index2 RENAME TO event_contains_url_index",
+ "ALTER INDEX events_order_room2 RENAME TO events_order_room",
+ "ALTER INDEX events_room_stream2 RENAME TO events_room_stream",
+ "ALTER INDEX events_ts2 RENAME TO events_ts",
+)
+
+
+class _BackgroundUpdates:
+ EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+ EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
+ DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
+ POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2"
+ INDEX_STREAM_ORDERING2 = "index_stream_ordering2"
+ INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url"
+ INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order"
+ INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream"
+ INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts"
+ REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column"
+
+
@attr.s(slots=True, frozen=True)
class _CalculateChainCover:
"""Return value for _calculate_chain_cover_txn."""
@@ -48,19 +76,15 @@ class _CalculateChainCover:
class EventsBackgroundUpdatesStore(SQLBaseStore):
-
- EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
- EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
- DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
-
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_update_handler(
- self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
+ _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME,
+ self._background_reindex_origin_server_ts,
)
self.db_pool.updates.register_background_update_handler(
- self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
+ _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
self._background_reindex_fields_sender,
)
@@ -85,7 +109,8 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
)
self.db_pool.updates.register_background_update_handler(
- self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
+ _BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES,
+ self._cleanup_extremities_bg_update,
)
self.db_pool.updates.register_background_update_handler(
@@ -139,6 +164,59 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
self._purged_chain_cover_index,
)
+ ################################################################################
+
+ # bg updates for replacing stream_ordering with a BIGINT
+ # (these only run on postgres.)
+
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.POPULATE_STREAM_ORDERING2,
+ self._background_populate_stream_ordering2,
+ )
+ # CREATE UNIQUE INDEX events_stream_ordering ON events(stream_ordering2);
+ self.db_pool.updates.register_background_index_update(
+ _BackgroundUpdates.INDEX_STREAM_ORDERING2,
+ index_name="events_stream_ordering",
+ table="events",
+ columns=["stream_ordering2"],
+ unique=True,
+ )
+ # CREATE INDEX event_contains_url_index ON events(room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
+ self.db_pool.updates.register_background_index_update(
+ _BackgroundUpdates.INDEX_STREAM_ORDERING2_CONTAINS_URL,
+ index_name="event_contains_url_index2",
+ table="events",
+ columns=["room_id", "topological_ordering", "stream_ordering2"],
+ where_clause="contains_url = true AND outlier = false",
+ )
+ # CREATE INDEX events_order_room ON events(room_id, topological_ordering, stream_ordering);
+ self.db_pool.updates.register_background_index_update(
+ _BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_ORDER,
+ index_name="events_order_room2",
+ table="events",
+ columns=["room_id", "topological_ordering", "stream_ordering2"],
+ )
+ # CREATE INDEX events_room_stream ON events(room_id, stream_ordering);
+ self.db_pool.updates.register_background_index_update(
+ _BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_STREAM,
+ index_name="events_room_stream2",
+ table="events",
+ columns=["room_id", "stream_ordering2"],
+ )
+ # CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
+ self.db_pool.updates.register_background_index_update(
+ _BackgroundUpdates.INDEX_STREAM_ORDERING2_TS,
+ index_name="events_ts2",
+ table="events",
+ columns=["origin_server_ts", "stream_ordering2"],
+ )
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN,
+ self._background_replace_stream_ordering_column,
+ )
+
+ ################################################################################
+
async def _background_reindex_fields_sender(self, progress, batch_size):
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
@@ -190,18 +268,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
}
self.db_pool.updates._background_update_progress_txn(
- txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
+ txn, _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
)
return len(rows)
result = await self.db_pool.runInteraction(
- self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
+ _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
)
if not result:
await self.db_pool.updates._end_background_update(
- self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
+ _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
)
return result
@@ -264,18 +342,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
}
self.db_pool.updates._background_update_progress_txn(
- txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
+ txn, _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, progress
)
return len(rows_to_update)
result = await self.db_pool.runInteraction(
- self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
+ _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
)
if not result:
await self.db_pool.updates._end_background_update(
- self.EVENT_ORIGIN_SERVER_TS_NAME
+ _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME
)
return result
@@ -454,7 +532,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
if not num_handled:
await self.db_pool.updates._end_background_update(
- self.DELETE_SOFT_FAILED_EXTREMITIES
+ _BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES
)
def _drop_table_txn(txn):
@@ -1009,3 +1087,81 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
await self.db_pool.updates._end_background_update("purged_chain_cover")
return result
+
+ async def _background_populate_stream_ordering2(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Populate events.stream_ordering2, then replace stream_ordering
+
+ This is to deal with the fact that stream_ordering was initially created as a
+ 32-bit integer field.
+ """
+ batch_size = max(batch_size, 1)
+
+ def process(txn: Cursor) -> int:
+ last_stream = progress.get("last_stream", -(1 << 31))
+ txn.execute(
+ """
+ UPDATE events SET stream_ordering2=stream_ordering
+ WHERE stream_ordering IN (
+ SELECT stream_ordering FROM events WHERE stream_ordering > ?
+ ORDER BY stream_ordering LIMIT ?
+ )
+ RETURNING stream_ordering;
+ """,
+ (last_stream, batch_size),
+ )
+ row_count = txn.rowcount
+ if row_count == 0:
+ return 0
+ last_stream = max(row[0] for row in txn)
+ logger.info("populated stream_ordering2 up to %i", last_stream)
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ _BackgroundUpdates.POPULATE_STREAM_ORDERING2,
+ {"last_stream": last_stream},
+ )
+ return row_count
+
+ result = await self.db_pool.runInteraction(
+ "_background_populate_stream_ordering2", process
+ )
+
+ if result != 0:
+ return result
+
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.POPULATE_STREAM_ORDERING2
+ )
+ return 0
+
+ async def _background_replace_stream_ordering_column(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Drop the old 'stream_ordering' column and rename 'stream_ordering2' into its place."""
+
+ def process(txn: Cursor) -> None:
+ for sql in _REPLACE_STREAM_ORDERING_SQL_COMMANDS:
+ logger.info("completing stream_ordering migration: %s", sql)
+ txn.execute(sql)
+
+ # ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the
+ # indexes on it.
+ # We need to pass execute a dummy function to handle the txn's result otherwise
+ # it tries to call fetchall() on it and fails because there's no result to fetch.
+ await self.db_pool.execute(
+ "background_analyze_new_stream_ordering_column",
+ lambda txn: None,
+ "ANALYZE events(stream_ordering2)",
+ )
+
+ await self.db_pool.runInteraction(
+ "_background_replace_stream_ordering_column", process
+ )
+
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN
+ )
+
+ return 0
diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py
index e7618832..77486107 100644
--- a/synapse/storage/databases/main/lock.py
+++ b/synapse/storage/databases/main/lock.py
@@ -310,14 +310,25 @@ class Lock:
_excinst: Optional[BaseException],
_exctb: Optional[TracebackType],
) -> bool:
+ await self.release()
+
+ return False
+
+ async def release(self) -> None:
+ """Release the lock.
+
+ This is automatically called when using the lock as a context manager.
+ """
+
+ if self._dropped:
+ return
+
if self._looping_call.running:
self._looping_call.stop()
await self._store._drop_lock(self._lock_name, self._lock_key, self._token)
self._dropped = True
- return False
-
def __del__(self) -> None:
if not self._dropped:
# We should not be dropped without the lock being released (unless
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 9b4e95e1..ba7075ca 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -73,20 +73,20 @@ class ProfileWorkerStore(SQLBaseStore):
async def set_profile_displayname(
self, user_localpart: str, new_displayname: Optional[str]
) -> None:
- await self.db_pool.simple_update_one(
+ await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"displayname": new_displayname},
+ values={"displayname": new_displayname},
desc="set_profile_displayname",
)
async def set_profile_avatar_url(
self, user_localpart: str, new_avatar_url: Optional[str]
) -> None:
- await self.db_pool.simple_update_one(
+ await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"avatar_url": new_avatar_url},
+ values={"avatar_url": new_avatar_url},
desc="set_profile_avatar_url",
)
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index e5c5cf8f..e31c5864 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -53,6 +53,9 @@ class TokenLookupResult:
valid_until_ms: The timestamp the token expires, if any.
token_owner: The "owner" of the token. This is either the same as the
user, or a server admin who is logged in as the user.
+ token_used: True if this token was used at least once in a request.
+ This field can be out of date since `get_user_by_access_token` is
+ cached.
"""
user_id = attr.ib(type=str)
@@ -62,6 +65,7 @@ class TokenLookupResult:
device_id = attr.ib(type=Optional[str], default=None)
valid_until_ms = attr.ib(type=Optional[int], default=None)
token_owner = attr.ib(type=str)
+ token_used = attr.ib(type=bool, default=False)
# Make the token owner default to the user ID, which is the common case.
@token_owner.default
@@ -69,6 +73,29 @@ class TokenLookupResult:
return self.user_id
+@attr.s(frozen=True, slots=True)
+class RefreshTokenLookupResult:
+ """Result of looking up a refresh token."""
+
+ user_id = attr.ib(type=str)
+ """The user this token belongs to."""
+
+ device_id = attr.ib(type=str)
+ """The device associated with this refresh token."""
+
+ token_id = attr.ib(type=int)
+ """The ID of this refresh token."""
+
+ next_token_id = attr.ib(type=Optional[int])
+ """The ID of the refresh token which replaced this one."""
+
+ has_next_refresh_token_been_refreshed = attr.ib(type=bool)
+ """True if the next refresh token was used for another refresh."""
+
+ has_next_access_token_been_used = attr.ib(type=bool)
+ """True if the next access token was already used at least once."""
+
+
class RegistrationWorkerStore(CacheInvalidationWorkerStore):
def __init__(
self,
@@ -441,7 +468,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
access_tokens.id as token_id,
access_tokens.device_id,
access_tokens.valid_until_ms,
- access_tokens.user_id as token_owner
+ access_tokens.user_id as token_owner,
+ access_tokens.used as token_used
FROM users
INNER JOIN access_tokens on users.name = COALESCE(puppets_user_id, access_tokens.user_id)
WHERE token = ?
@@ -449,8 +477,15 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
txn.execute(sql, (token,))
rows = self.db_pool.cursor_to_dict(txn)
+
if rows:
- return TokenLookupResult(**rows[0])
+ row = rows[0]
+
+ # This field is nullable, ensure it comes out as a boolean
+ if row["token_used"] is None:
+ row["token_used"] = False
+
+ return TokenLookupResult(**row)
return None
@@ -1072,6 +1107,111 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="update_access_token_last_validated",
)
+ @cached()
+ async def mark_access_token_as_used(self, token_id: int) -> None:
+ """
+ Mark the access token as used, which invalidates the refresh token used
+ to obtain it.
+
+ Because get_user_by_access_token is cached, this function might be
+ called multiple times for the same token, effectively doing unnecessary
+ SQL updates. Because updating the `used` field only goes one way (from
+ False to True) it is safe to cache this function as well to avoid this
+ issue.
+
+ Args:
+ token_id: The ID of the access token to update.
+ Raises:
+ StoreError if there was a problem updating this.
+ """
+ await self.db_pool.simple_update_one(
+ "access_tokens",
+ {"id": token_id},
+ {"used": True},
+ desc="mark_access_token_as_used",
+ )
+
+ async def lookup_refresh_token(
+ self, token: str
+ ) -> Optional[RefreshTokenLookupResult]:
+ """Lookup a refresh token with hints about its validity."""
+
+ def _lookup_refresh_token_txn(txn) -> Optional[RefreshTokenLookupResult]:
+ txn.execute(
+ """
+ SELECT
+ rt.id token_id,
+ rt.user_id,
+ rt.device_id,
+ rt.next_token_id,
+ (nrt.next_token_id IS NOT NULL) has_next_refresh_token_been_refreshed,
+ at.used has_next_access_token_been_used
+ FROM refresh_tokens rt
+ LEFT JOIN refresh_tokens nrt ON rt.next_token_id = nrt.id
+ LEFT JOIN access_tokens at ON at.refresh_token_id = nrt.id
+ WHERE rt.token = ?
+ """,
+ (token,),
+ )
+ row = txn.fetchone()
+
+ if row is None:
+ return None
+
+ return RefreshTokenLookupResult(
+ token_id=row[0],
+ user_id=row[1],
+ device_id=row[2],
+ next_token_id=row[3],
+ has_next_refresh_token_been_refreshed=row[4],
+ # This column is nullable, ensure it's a boolean
+ has_next_access_token_been_used=(row[5] or False),
+ )
+
+ return await self.db_pool.runInteraction(
+ "lookup_refresh_token", _lookup_refresh_token_txn
+ )
+
+ async def replace_refresh_token(self, token_id: int, next_token_id: int) -> None:
+ """
+ Set the successor of a refresh token, removing the existing successor
+ if any.
+
+ Args:
+ token_id: ID of the refresh token to update.
+ next_token_id: ID of its successor.
+ """
+
+ def _replace_refresh_token_txn(txn) -> None:
+ # First check if there was an existing refresh token
+ old_next_token_id = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ "refresh_tokens",
+ {"id": token_id},
+ "next_token_id",
+ allow_none=True,
+ )
+
+ self.db_pool.simple_update_one_txn(
+ txn,
+ "refresh_tokens",
+ {"id": token_id},
+ {"next_token_id": next_token_id},
+ )
+
+ # Delete the old "next" token if it exists. This should cascade and
+ # delete the associated access_token
+ if old_next_token_id is not None:
+ self.db_pool.simple_delete_one_txn(
+ txn,
+ "refresh_tokens",
+ {"id": old_next_token_id},
+ )
+
+ await self.db_pool.runInteraction(
+ "replace_refresh_token", _replace_refresh_token_txn
+ )
+
class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
def __init__(
@@ -1263,6 +1403,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
+ self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
async def add_access_token_to_user(
self,
@@ -1271,14 +1412,18 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
device_id: Optional[str],
valid_until_ms: Optional[int],
puppets_user_id: Optional[str] = None,
+ refresh_token_id: Optional[int] = None,
) -> int:
"""Adds an access token for the given user.
Args:
user_id: The user ID.
token: The new access token to add.
- device_id: ID of the device to associate with the access token
+ device_id: ID of the device to associate with the access token.
valid_until_ms: when the token is valid until. None for no expiry.
+ puppets_user_id
+ refresh_token_id: ID of the refresh token generated alongside this
+ access token.
Raises:
StoreError if there was a problem adding this.
Returns:
@@ -1297,12 +1442,47 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
"valid_until_ms": valid_until_ms,
"puppets_user_id": puppets_user_id,
"last_validated": now,
+ "refresh_token_id": refresh_token_id,
+ "used": False,
},
desc="add_access_token_to_user",
)
return next_id
+ async def add_refresh_token_to_user(
+ self,
+ user_id: str,
+ token: str,
+ device_id: Optional[str],
+ ) -> int:
+ """Adds a refresh token for the given user.
+
+ Args:
+ user_id: The user ID.
+ token: The new access token to add.
+ device_id: ID of the device to associate with the refresh token.
+ Raises:
+ StoreError if there was a problem adding this.
+ Returns:
+ The token ID
+ """
+ next_id = self._refresh_tokens_id_gen.get_next()
+
+ await self.db_pool.simple_insert(
+ "refresh_tokens",
+ {
+ "id": next_id,
+ "user_id": user_id,
+ "device_id": device_id,
+ "token": token,
+ "next_token_id": None,
+ },
+ desc="add_refresh_token_to_user",
+ )
+
+ return next_id
+
def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str:
old_device_id = self.db_pool.simple_select_one_onecol_txn(
txn, "access_tokens", {"token": token}, "device_id"
@@ -1545,7 +1725,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
device_id: Optional[str] = None,
) -> List[Tuple[str, int, Optional[str]]]:
"""
- Invalidate access tokens belonging to a user
+ Invalidate access and refresh tokens belonging to a user
Args:
user_id: ID of user the tokens belong to
@@ -1565,7 +1745,13 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
items = keyvalues.items()
where_clause = " AND ".join(k + " = ?" for k, _ in items)
values = [v for _, v in items] # type: List[Union[str, int]]
+ # Conveniently, refresh_tokens and access_tokens both use the user_id and device_id fields. Only caveat
+ # is the `except_token_id` param that is tricky to get right, so for now we're just using the same where
+ # clause and values before we handle that. This seems to be only used in the "set password" handler.
+ refresh_where_clause = where_clause
+ refresh_values = values.copy()
if except_token_id:
+ # TODO: support that for refresh tokens
where_clause += " AND id != ?"
values.append(except_token_id)
@@ -1583,6 +1769,11 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
txn.execute("DELETE FROM access_tokens WHERE %s" % where_clause, values)
+ txn.execute(
+ "DELETE FROM refresh_tokens WHERE %s" % refresh_where_clause,
+ refresh_values,
+ )
+
return tokens_and_devices
return await self.db_pool.runInteraction("user_delete_access_tokens", f)
@@ -1599,6 +1790,14 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
await self.db_pool.runInteraction("delete_access_token", f)
+ async def delete_refresh_token(self, refresh_token: str) -> None:
+ def f(txn):
+ self.db_pool.simple_delete_one_txn(
+ txn, table="refresh_tokens", keyvalues={"token": refresh_token}
+ )
+
+ await self.db_pool.runInteraction("delete_refresh_token", f)
+
async def add_user_pending_deactivation(self, user_id: str) -> None:
"""
Adds a user to the table of users who need to be parted from all the rooms they're
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index 1882bfd9..20cd63c3 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -49,6 +49,12 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
"""
...
+ @property
+ @abc.abstractmethod
+ def supports_returning(self) -> bool:
+ """Do we support the `RETURNING` clause in insert/update/delete?"""
+ ...
+
@abc.abstractmethod
def check_database(
self, db_conn: ConnectionType, allow_outdated_version: bool = False
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 21411c5f..30f948a0 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -133,6 +133,11 @@ class PostgresEngine(BaseDatabaseEngine):
"""Do we support using `a = ANY(?)` and passing a list"""
return True
+ @property
+ def supports_returning(self) -> bool:
+ """Do we support the `RETURNING` clause in insert/update/delete?"""
+ return True
+
def is_deadlock(self, error):
if isinstance(error, self.module.DatabaseError):
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 5fe1b205..70d17d4f 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -60,6 +60,11 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]):
"""Do we support using `a = ANY(?)` and passing a list"""
return False
+ @property
+ def supports_returning(self) -> bool:
+ """Do we support the `RETURNING` clause in insert/update/delete?"""
+ return self.module.sqlite_version_info >= (3, 35, 0)
+
def check_database(self, db_conn, allow_outdated_version: bool = False):
if not allow_outdated_version:
version = self.module.sqlite_version_info
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index d36ba1d7..0a53b73c 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-SCHEMA_VERSION = 59
+SCHEMA_VERSION = 60
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
diff --git a/synapse/storage/schema/main/delta/59/14refresh_tokens.sql b/synapse/storage/schema/main/delta/59/14refresh_tokens.sql
new file mode 100644
index 00000000..9a6bce1e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/59/14refresh_tokens.sql
@@ -0,0 +1,34 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Holds MSC2918 refresh tokens
+CREATE TABLE refresh_tokens (
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ token TEXT NOT NULL,
+ -- When consumed, a new refresh token is generated, which is tracked by
+ -- this foreign key
+ next_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE,
+ UNIQUE(token)
+);
+
+-- Add a reference to the refresh token generated alongside each access token
+ALTER TABLE "access_tokens"
+ ADD COLUMN refresh_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE;
+
+-- Add a flag whether the token was already used or not
+ALTER TABLE "access_tokens"
+ ADD COLUMN used BOOLEAN;
diff --git a/synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres b/synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres
new file mode 100644
index 00000000..0edc9fe7
--- /dev/null
+++ b/synapse/storage/schema/main/delta/60/01recreate_stream_ordering.sql.postgres
@@ -0,0 +1,45 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This migration handles the process of changing the type of `stream_ordering` to
+-- a BIGINT.
+--
+-- Note that this is only a problem on postgres as sqlite only has one "integer" type
+-- which can cope with values up to 2^63.
+
+-- First add a new column to contain the bigger stream_ordering
+ALTER TABLE events ADD COLUMN stream_ordering2 BIGINT;
+
+-- Create a rule which will populate it for new rows.
+CREATE OR REPLACE RULE "populate_stream_ordering2" AS
+ ON INSERT TO events
+ DO UPDATE events SET stream_ordering2=NEW.stream_ordering WHERE stream_ordering=NEW.stream_ordering;
+
+-- Start a bg process to populate it for old events
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (6001, 'populate_stream_ordering2', '{}');
+
+-- ... and some more to build indexes on it. These aren't really interdependent
+-- but the backround_updates manager can only handle a single dependency per update.
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (6001, 'index_stream_ordering2', '{}', 'populate_stream_ordering2'),
+ (6001, 'index_stream_ordering2_room_order', '{}', 'index_stream_ordering2'),
+ (6001, 'index_stream_ordering2_contains_url', '{}', 'index_stream_ordering2_room_order'),
+ (6001, 'index_stream_ordering2_room_stream', '{}', 'index_stream_ordering2_contains_url'),
+ (6001, 'index_stream_ordering2_ts', '{}', 'index_stream_ordering2_room_stream');
+
+-- ... and another to do the switcheroo
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (6001, 'replace_stream_ordering_column', '{}', 'index_stream_ordering2_ts');
diff --git a/synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres b/synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres
new file mode 100644
index 00000000..630c24fd
--- /dev/null
+++ b/synapse/storage/schema/main/delta/60/02change_stream_ordering_columns.sql.postgres
@@ -0,0 +1,30 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This migration is closely related to '01recreate_stream_ordering.sql.postgres'.
+--
+-- It updates the other tables which use an INTEGER to refer to a stream ordering.
+-- These tables are all small enough that a re-create is tractable.
+ALTER TABLE pushers ALTER COLUMN last_stream_ordering SET DATA TYPE BIGINT;
+ALTER TABLE federation_stream_position ALTER COLUMN stream_id SET DATA TYPE BIGINT;
+
+-- these aren't actually event stream orderings, but they are numbers where 2 billion
+-- is a bit limiting, application_services_state is tiny, and I don't want to ever have
+-- to do this again.
+ALTER TABLE application_services_state ALTER COLUMN last_txn SET DATA TYPE BIGINT;
+ALTER TABLE application_services_state ALTER COLUMN read_receipt_stream_id SET DATA TYPE BIGINT;
+ALTER TABLE application_services_state ALTER COLUMN presence_stream_id SET DATA TYPE BIGINT;
+
+
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index d89e9d9b..4b9d0433 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -12,9 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import threading
+import weakref
from functools import wraps
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Collection,
@@ -31,10 +34,19 @@ from typing import (
from typing_extensions import Literal
+from twisted.internet import reactor
+
from synapse.config import cache as cache_config
-from synapse.util import caches
+from synapse.metrics.background_process_metrics import wrap_as_background_process
+from synapse.util import Clock, caches
from synapse.util.caches import CacheMetric, register_cache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
+from synapse.util.linked_list import ListNode
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
try:
from pympler.asizeof import Asizer
@@ -82,19 +94,126 @@ def enumerate_leaves(node, depth):
yield m
+P = TypeVar("P")
+
+
+class _TimedListNode(ListNode[P]):
+ """A `ListNode` that tracks last access time."""
+
+ __slots__ = ["last_access_ts_secs"]
+
+ def update_last_access(self, clock: Clock):
+ self.last_access_ts_secs = int(clock.time())
+
+
+# Whether to insert new cache entries to the global list. We only add to it if
+# time based eviction is enabled.
+USE_GLOBAL_LIST = False
+
+# A linked list of all cache entries, allowing efficient time based eviction.
+GLOBAL_ROOT = ListNode["_Node"].create_root_node()
+
+
+@wrap_as_background_process("LruCache._expire_old_entries")
+async def _expire_old_entries(clock: Clock, expiry_seconds: int):
+ """Walks the global cache list to find cache entries that haven't been
+ accessed in the given number of seconds.
+ """
+
+ now = int(clock.time())
+ node = GLOBAL_ROOT.prev_node
+ assert node is not None
+
+ i = 0
+
+ logger.debug("Searching for stale caches")
+
+ while node is not GLOBAL_ROOT:
+ # Only the root node isn't a `_TimedListNode`.
+ assert isinstance(node, _TimedListNode)
+
+ if node.last_access_ts_secs > now - expiry_seconds:
+ break
+
+ cache_entry = node.get_cache_entry()
+ next_node = node.prev_node
+
+ # The node should always have a reference to a cache entry and a valid
+ # `prev_node`, as we only drop them when we remove the node from the
+ # list.
+ assert next_node is not None
+ assert cache_entry is not None
+ cache_entry.drop_from_cache()
+
+ # If we do lots of work at once we yield to allow other stuff to happen.
+ if (i + 1) % 10000 == 0:
+ logger.debug("Waiting during drop")
+ await clock.sleep(0)
+ logger.debug("Waking during drop")
+
+ node = next_node
+
+ # If we've yielded then our current node may have been evicted, so we
+ # need to check that its still valid.
+ if node.prev_node is None:
+ break
+
+ i += 1
+
+ logger.info("Dropped %d items from caches", i)
+
+
+def setup_expire_lru_cache_entries(hs: "HomeServer"):
+ """Start a background job that expires all cache entries if they have not
+ been accessed for the given number of seconds.
+ """
+ if not hs.config.caches.expiry_time_msec:
+ return
+
+ logger.info(
+ "Expiring LRU caches after %d seconds", hs.config.caches.expiry_time_msec / 1000
+ )
+
+ global USE_GLOBAL_LIST
+ USE_GLOBAL_LIST = True
+
+ clock = hs.get_clock()
+ clock.looping_call(
+ _expire_old_entries, 30 * 1000, clock, hs.config.caches.expiry_time_msec / 1000
+ )
+
+
class _Node:
- __slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"]
+ __slots__ = [
+ "_list_node",
+ "_global_list_node",
+ "_cache",
+ "key",
+ "value",
+ "callbacks",
+ "memory",
+ ]
def __init__(
self,
- prev_node,
- next_node,
+ root: "ListNode[_Node]",
key,
value,
+ cache: "weakref.ReferenceType[LruCache]",
+ clock: Clock,
callbacks: Collection[Callable[[], None]] = (),
):
- self.prev_node = prev_node
- self.next_node = next_node
+ self._list_node = ListNode.insert_after(self, root)
+ self._global_list_node = None
+ if USE_GLOBAL_LIST:
+ self._global_list_node = _TimedListNode.insert_after(self, GLOBAL_ROOT)
+ self._global_list_node.update_last_access(clock)
+
+ # We store a weak reference to the cache object so that this _Node can
+ # remove itself from the cache. If the cache is dropped we ensure we
+ # remove our entries in the lists.
+ self._cache = cache
+
self.key = key
self.value = value
@@ -116,11 +235,16 @@ class _Node:
self.memory = (
_get_size_of(key)
+ _get_size_of(value)
+ + _get_size_of(self._list_node, recurse=False)
+ _get_size_of(self.callbacks, recurse=False)
+ _get_size_of(self, recurse=False)
)
self.memory += _get_size_of(self.memory, recurse=False)
+ if self._global_list_node:
+ self.memory += _get_size_of(self._global_list_node, recurse=False)
+ self.memory += _get_size_of(self._global_list_node.last_access_ts_secs)
+
def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None:
"""Add to stored list of callbacks, removing duplicates."""
@@ -147,6 +271,32 @@ class _Node:
self.callbacks = None
+ def drop_from_cache(self) -> None:
+ """Drop this node from the cache.
+
+ Ensures that the entry gets removed from the cache and that we get
+ removed from all lists.
+ """
+ cache = self._cache()
+ if not cache or not cache.pop(self.key, None):
+ # `cache.pop` should call `drop_from_lists()`, unless this Node had
+ # already been removed from the cache.
+ self.drop_from_lists()
+
+ def drop_from_lists(self) -> None:
+ """Remove this node from the cache lists."""
+ self._list_node.remove_from_list()
+
+ if self._global_list_node:
+ self._global_list_node.remove_from_list()
+
+ def move_to_front(self, clock: Clock, cache_list_root: ListNode) -> None:
+ """Moves this node to the front of all the lists its in."""
+ self._list_node.move_after(cache_list_root)
+ if self._global_list_node:
+ self._global_list_node.move_after(GLOBAL_ROOT)
+ self._global_list_node.update_last_access(clock)
+
class LruCache(Generic[KT, VT]):
"""
@@ -163,6 +313,7 @@ class LruCache(Generic[KT, VT]):
size_callback: Optional[Callable] = None,
metrics_collection_callback: Optional[Callable[[], None]] = None,
apply_cache_factor_from_config: bool = True,
+ clock: Optional[Clock] = None,
):
"""
Args:
@@ -188,6 +339,13 @@ class LruCache(Generic[KT, VT]):
apply_cache_factor_from_config (bool): If true, `max_size` will be
multiplied by a cache factor derived from the homeserver config
"""
+ # Default `clock` to something sensible. Note that we rename it to
+ # `real_clock` so that mypy doesn't think its still `Optional`.
+ if clock is None:
+ real_clock = Clock(reactor)
+ else:
+ real_clock = clock
+
cache = cache_type()
self.cache = cache # Used for introspection.
self.apply_cache_factor_from_config = apply_cache_factor_from_config
@@ -219,17 +377,31 @@ class LruCache(Generic[KT, VT]):
# this is exposed for access from outside this class
self.metrics = metrics
- list_root = _Node(None, None, None, None)
- list_root.next_node = list_root
- list_root.prev_node = list_root
+ # We create a single weakref to self here so that we don't need to keep
+ # creating more each time we create a `_Node`.
+ weak_ref_to_self = weakref.ref(self)
+
+ list_root = ListNode[_Node].create_root_node()
lock = threading.Lock()
def evict():
while cache_len() > self.max_size:
+ # Get the last node in the list (i.e. the oldest node).
todelete = list_root.prev_node
- evicted_len = delete_node(todelete)
- cache.pop(todelete.key, None)
+
+ # The list root should always have a valid `prev_node` if the
+ # cache is not empty.
+ assert todelete is not None
+
+ # The node should always have a reference to a cache entry, as
+ # we only drop the cache entry when we remove the node from the
+ # list.
+ node = todelete.get_cache_entry()
+ assert node is not None
+
+ evicted_len = delete_node(node)
+ cache.pop(node.key, None)
if metrics:
metrics.inc_evictions(evicted_len)
@@ -255,11 +427,7 @@ class LruCache(Generic[KT, VT]):
self.len = synchronized(cache_len)
def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
- prev_node = list_root
- next_node = prev_node.next_node
- node = _Node(prev_node, next_node, key, value, callbacks)
- prev_node.next_node = node
- next_node.prev_node = node
+ node = _Node(list_root, key, value, weak_ref_to_self, real_clock, callbacks)
cache[key] = node
if size_callback:
@@ -268,23 +436,11 @@ class LruCache(Generic[KT, VT]):
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.inc_memory_usage(node.memory)
- def move_node_to_front(node):
- prev_node = node.prev_node
- next_node = node.next_node
- prev_node.next_node = next_node
- next_node.prev_node = prev_node
- prev_node = list_root
- next_node = prev_node.next_node
- node.prev_node = prev_node
- node.next_node = next_node
- prev_node.next_node = node
- next_node.prev_node = node
-
- def delete_node(node):
- prev_node = node.prev_node
- next_node = node.next_node
- prev_node.next_node = next_node
- next_node.prev_node = prev_node
+ def move_node_to_front(node: _Node):
+ node.move_to_front(real_clock, list_root)
+
+ def delete_node(node: _Node) -> int:
+ node.drop_from_lists()
deleted_len = 1
if size_callback:
@@ -411,10 +567,13 @@ class LruCache(Generic[KT, VT]):
@synchronized
def cache_clear() -> None:
- list_root.next_node = list_root
- list_root.prev_node = list_root
for node in cache.values():
node.run_and_clear_callbacks()
+ node.drop_from_lists()
+
+ assert list_root.next_node == list_root
+ assert list_root.prev_node == list_root
+
cache.clear()
if size_callback:
cached_cache_len[0] = 0
@@ -484,3 +643,11 @@ class LruCache(Generic[KT, VT]):
self._on_resize()
return True
return False
+
+ def __del__(self) -> None:
+ # We're about to be deleted, so we make sure to clear up all the nodes
+ # and run callbacks, etc.
+ #
+ # This happens e.g. in the sync code where we have an expiring cache of
+ # lru caches.
+ self.clear()
diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py
new file mode 100644
index 00000000..a456b136
--- /dev/null
+++ b/synapse/util/linked_list.py
@@ -0,0 +1,150 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A circular doubly linked list implementation.
+"""
+
+import threading
+from typing import Generic, Optional, Type, TypeVar
+
+P = TypeVar("P")
+LN = TypeVar("LN", bound="ListNode")
+
+
+class ListNode(Generic[P]):
+ """A node in a circular doubly linked list, with an (optional) reference to
+ a cache entry.
+
+ The reference should only be `None` for the root node or if the node has
+ been removed from the list.
+ """
+
+ # A lock to protect mutating the list prev/next pointers.
+ _LOCK = threading.Lock()
+
+ # We don't use attrs here as in py3.6 you can't have `attr.s(slots=True)`
+ # and inherit from `Generic` for some reason
+ __slots__ = [
+ "cache_entry",
+ "prev_node",
+ "next_node",
+ ]
+
+ def __init__(self, cache_entry: Optional[P] = None) -> None:
+ self.cache_entry = cache_entry
+ self.prev_node: Optional[ListNode[P]] = None
+ self.next_node: Optional[ListNode[P]] = None
+
+ @classmethod
+ def create_root_node(cls: Type["ListNode[P]"]) -> "ListNode[P]":
+ """Create a new linked list by creating a "root" node, which is a node
+ that has prev_node/next_node pointing to itself and no associated cache
+ entry.
+ """
+ root = cls()
+ root.prev_node = root
+ root.next_node = root
+ return root
+
+ @classmethod
+ def insert_after(
+ cls: Type[LN],
+ cache_entry: P,
+ node: "ListNode[P]",
+ ) -> LN:
+ """Create a new list node that is placed after the given node.
+
+ Args:
+ cache_entry: The associated cache entry.
+ node: The existing node in the list to insert the new entry after.
+ """
+ new_node = cls(cache_entry)
+ with cls._LOCK:
+ new_node._refs_insert_after(node)
+ return new_node
+
+ def remove_from_list(self):
+ """Remove this node from the list."""
+ with self._LOCK:
+ self._refs_remove_node_from_list()
+
+ # We drop the reference to the cache entry to break the reference cycle
+ # between the list node and cache entry, allowing the two to be dropped
+ # immediately rather than at the next GC.
+ self.cache_entry = None
+
+ def move_after(self, node: "ListNode"):
+ """Move this node from its current location in the list to after the
+ given node.
+ """
+ with self._LOCK:
+ # We assert that both this node and the target node is still "alive".
+ assert self.prev_node
+ assert self.next_node
+ assert node.prev_node
+ assert node.next_node
+
+ assert self is not node
+
+ # Remove self from the list
+ self._refs_remove_node_from_list()
+
+ # Insert self back into the list, after target node
+ self._refs_insert_after(node)
+
+ def _refs_remove_node_from_list(self):
+ """Internal method to *just* remove the node from the list, without
+ e.g. clearing out the cache entry.
+ """
+ if self.prev_node is None or self.next_node is None:
+ # We've already been removed from the list.
+ return
+
+ prev_node = self.prev_node
+ next_node = self.next_node
+
+ prev_node.next_node = next_node
+ next_node.prev_node = prev_node
+
+ # We set these to None so that we don't get circular references,
+ # allowing us to be dropped without having to go via the GC.
+ self.prev_node = None
+ self.next_node = None
+
+ def _refs_insert_after(self, node: "ListNode"):
+ """Internal method to insert the node after the given node."""
+
+ # This method should only be called when we're not already in the list.
+ assert self.prev_node is None
+ assert self.next_node is None
+
+ # We expect the given node to be in the list and thus have valid
+ # prev/next refs.
+ assert node.next_node
+ assert node.prev_node
+
+ prev_node = node
+ next_node = node.next_node
+
+ self.prev_node = prev_node
+ self.next_node = next_node
+
+ prev_node.next_node = self
+ next_node.prev_node = self
+
+ def get_cache_entry(self) -> Optional[P]:
+ """Get the cache entry, returns None if this is the root node (i.e.
+ cache_entry is None) or if the entry has been dropped.
+ """
+ return self.cache_entry
diff --git a/sytest-blacklist b/sytest-blacklist
index 89c4e828..566ef967 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -45,5 +45,4 @@ Peeked rooms only turn up in the sync for the device who peeked them
# Blacklisted due to changes made in #10272
Outbound federation will ignore a missing event with bad JSON for room version 6
-Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination
Federation rejects inbound events where the prev_events cannot be found
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 1b0a8157..f76fea4f 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -58,6 +58,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
user_id=self.test_user, token_id=5, device_id="device"
)
self.store.get_user_by_access_token = simple_async_mock(user_info)
+ self.store.mark_access_token_as_used = simple_async_mock(None)
request = Mock(args={})
request.args[b"access_token"] = [self.test_token]
diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py
index 8c215d50..aab44bce 100644
--- a/tests/federation/transport/test_knocking.py
+++ b/tests/federation/transport/test_knocking.py
@@ -205,9 +205,7 @@ class FederationKnockingTestCase(
# Have this homeserver skip event auth checks. This is necessary due to
# event auth checks ensuring that events were signed by the sender's homeserver.
- async def _check_event_auth(
- origin, event, context, state, auth_events, backfilled
- ):
+ async def _check_event_auth(origin, event, context, *args, **kwargs):
return context
homeserver.get_federation_handler()._check_event_auth = _check_event_auth
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index 84c38b29..3ac48e5e 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -257,7 +257,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
self.assertEqual(device_data, {"device_data": {"foo": "bar"}})
# Create a new login for the user and dehydrated the device
- device_id, access_token = self.get_success(
+ device_id, access_token, _expiration_time, _refresh_token = self.get_success(
self.registration.register_device(
user_id=user_id,
device_id=None,
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 8796af45..ba8cf44f 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -251,7 +251,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
join_event.signatures[other_server] = {"x": "y"}
with LoggingContext("send_join"):
d = run_in_background(
- self.handler.on_send_join_request, other_server, join_event
+ self.handler.on_send_membership_event, other_server, join_event
)
self.get_success(d)
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index dfb9b3a0..18e92e90 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -734,7 +734,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
- self.auth = hs.get_auth()
+ self._event_auth_handler = hs.get_event_auth_handler()
# We don't actually check signatures in tests, so lets just create a
# random key to use.
@@ -846,7 +846,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
builder = EventBuilder(
state=self.state,
- auth=self.auth,
+ event_auth_handler=self._event_auth_handler,
store=self.store,
clock=self.clock,
hostname=hostname,
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index c5f6bc3c..d3efb67e 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -19,7 +19,7 @@ from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, ResourceLimitError, SynapseError
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.spam_checker_api import RegistrationBehaviour
-from synapse.types import RoomAlias, UserID, create_requester
+from synapse.types import RoomAlias, RoomID, UserID, create_requester
from tests.test_utils import make_awaitable
from tests.unittest import override_config
@@ -719,3 +719,50 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
)
return user_id, token
+
+
+class RemoteAutoJoinTestCase(unittest.HomeserverTestCase):
+ """Tests auto-join on remote rooms."""
+
+ def make_homeserver(self, reactor, clock):
+ self.room_id = "!roomid:remotetest"
+
+ async def update_membership(*args, **kwargs):
+ pass
+
+ async def lookup_room_alias(*args, **kwargs):
+ return RoomID.from_string(self.room_id), ["remotetest"]
+
+ self.room_member_handler = Mock(spec=["update_membership", "lookup_room_alias"])
+ self.room_member_handler.update_membership.side_effect = update_membership
+ self.room_member_handler.lookup_room_alias.side_effect = lookup_room_alias
+
+ hs = self.setup_test_homeserver(room_member_handler=self.room_member_handler)
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+ self.handler = self.hs.get_registration_handler()
+ self.store = self.hs.get_datastore()
+
+ @override_config({"auto_join_rooms": ["#room:remotetest"]})
+ def test_auto_create_auto_join_remote_room(self):
+ """Tests that we don't attempt to create remote rooms, and that we don't attempt
+ to invite ourselves to rooms we're not in."""
+
+ # Register a first user; this should call _create_and_join_rooms
+ self.get_success(self.handler.register_user(localpart="jeff"))
+
+ _, kwargs = self.room_member_handler.update_membership.call_args
+
+ self.assertEqual(kwargs["room_id"], self.room_id)
+ self.assertEqual(kwargs["action"], "join")
+ self.assertEqual(kwargs["remote_room_hosts"], ["remotetest"])
+
+ # Register a second user; this should call _join_rooms
+ self.get_success(self.handler.register_user(localpart="jeff2"))
+
+ _, kwargs = self.room_member_handler.update_membership.call_args
+
+ self.assertEqual(kwargs["room_id"], self.room_id)
+ self.assertEqual(kwargs["action"], "join")
+ self.assertEqual(kwargs["remote_room_hosts"], ["remotetest"])
diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py
index 131d362c..9771d3fb 100644
--- a/tests/handlers/test_space_summary.py
+++ b/tests/handlers/test_space_summary.py
@@ -14,6 +14,7 @@
from typing import Any, Iterable, Optional, Tuple
from unittest import mock
+from synapse.api.constants import EventContentFields, RoomTypes
from synapse.api.errors import AuthError
from synapse.handlers.space_summary import _child_events_comparison_key
from synapse.rest import admin
@@ -97,9 +98,21 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
self.hs = hs
self.handler = self.hs.get_space_summary_handler()
+ # Create a user.
self.user = self.register_user("user", "pass")
self.token = self.login("user", "pass")
+ # Create a space and a child room.
+ self.space = self.helper.create_room_as(
+ self.user,
+ tok=self.token,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ self.room = self.helper.create_room_as(self.user, tok=self.token)
+ self._add_child(self.space, self.room, self.token)
+
def _add_child(self, space_id: str, room_id: str, token: str) -> None:
"""Add a child room to a space."""
self.helper.send_state(
@@ -128,43 +141,32 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
def test_simple_space(self):
"""Test a simple space with a single room."""
- space = self.helper.create_room_as(self.user, tok=self.token)
- room = self.helper.create_room_as(self.user, tok=self.token)
- self._add_child(space, room, self.token)
-
- result = self.get_success(self.handler.get_space_summary(self.user, space))
+ result = self.get_success(self.handler.get_space_summary(self.user, self.space))
# The result should have the space and the room in it, along with a link
# from space -> room.
- self._assert_rooms(result, [space, room])
- self._assert_events(result, [(space, room)])
+ self._assert_rooms(result, [self.space, self.room])
+ self._assert_events(result, [(self.space, self.room)])
def test_visibility(self):
"""A user not in a space cannot inspect it."""
- space = self.helper.create_room_as(self.user, tok=self.token)
- room = self.helper.create_room_as(self.user, tok=self.token)
- self._add_child(space, room, self.token)
-
user2 = self.register_user("user2", "pass")
token2 = self.login("user2", "pass")
# The user cannot see the space.
- self.get_failure(self.handler.get_space_summary(user2, space), AuthError)
+ self.get_failure(self.handler.get_space_summary(user2, self.space), AuthError)
# Joining the room causes it to be visible.
- self.helper.join(space, user2, tok=token2)
- result = self.get_success(self.handler.get_space_summary(user2, space))
+ self.helper.join(self.space, user2, tok=token2)
+ result = self.get_success(self.handler.get_space_summary(user2, self.space))
# The result should only have the space, but includes the link to the room.
- self._assert_rooms(result, [space])
- self._assert_events(result, [(space, room)])
+ self._assert_rooms(result, [self.space])
+ self._assert_events(result, [(self.space, self.room)])
def test_world_readable(self):
"""A world-readable room is visible to everyone."""
- space = self.helper.create_room_as(self.user, tok=self.token)
- room = self.helper.create_room_as(self.user, tok=self.token)
- self._add_child(space, room, self.token)
self.helper.send_state(
- space,
+ self.space,
event_type="m.room.history_visibility",
body={"history_visibility": "world_readable"},
tok=self.token,
@@ -173,6 +175,6 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
user2 = self.register_user("user2", "pass")
# The space should be visible, as well as the link to the room.
- result = self.get_success(self.handler.get_space_summary(user2, space))
- self._assert_rooms(result, [space])
- self._assert_events(result, [(space, room)])
+ result = self.get_success(self.handler.get_space_summary(user2, self.space))
+ self._assert_rooms(result, [self.space])
+ self._assert_events(result, [(self.space, self.room)])
diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 584da583..a0c710f8 100644
--- a/tests/replication/test_federation_sender_shard.py
+++ b/tests/replication/test_federation_sender_shard.py
@@ -228,7 +228,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None)
)
- self.get_success(federation.on_send_join_request(remote_server, join_event))
+ self.get_success(federation.on_send_membership_event(remote_server, join_event))
self.replicate()
return room
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index d599a4c9..4fccce34 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -939,7 +939,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("POST", self.url, b"{}")
- self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self):
@@ -950,7 +950,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", url, access_token=self.other_user_token)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
@@ -960,7 +960,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
content=b"{}",
)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self):
@@ -990,7 +990,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_user_is_not_local(self):
@@ -1006,7 +1006,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
def test_deactivate_user_erase_true(self):
"""
- Test deactivating an user and set `erase` to `true`
+ Test deactivating a user and set `erase` to `true`
"""
# Get user
@@ -1016,24 +1016,22 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User1", channel.json_body["displayname"])
- # Deactivate user
- body = json.dumps({"erase": True})
-
+ # Deactivate and erase user
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"erase": True},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Get user
channel = self.make_request(
@@ -1042,7 +1040,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -1053,7 +1051,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
def test_deactivate_user_erase_false(self):
"""
- Test deactivating an user and set `erase` to `false`
+ Test deactivating a user and set `erase` to `false`
"""
# Get user
@@ -1063,7 +1061,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
@@ -1071,13 +1069,11 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
self.assertEqual("User1", channel.json_body["displayname"])
# Deactivate user
- body = json.dumps({"erase": False})
-
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"erase": False},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
@@ -1089,7 +1085,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -1098,6 +1094,60 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
self._is_erased("@user:test", False)
+ def test_deactivate_user_erase_true_no_profile(self):
+ """
+ Test deactivating a user and set `erase` to `true`
+ if user has no profile information (stored in the database table `profiles`).
+ """
+
+ # Users normally have an entry in `profiles`, but occasionally they are created without one.
+ # To test deactivation for users without a profile, we delete the profile information for our user.
+ self.get_success(
+ self.store.db_pool.simple_delete_one(
+ table="profiles", keyvalues={"user_id": "user"}
+ )
+ )
+
+ # Get user
+ channel = self.make_request(
+ "GET",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(False, channel.json_body["deactivated"])
+ self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
+ self.assertIsNone(channel.json_body["avatar_url"])
+ self.assertIsNone(channel.json_body["displayname"])
+
+ # Deactivate and erase user
+ channel = self.make_request(
+ "POST",
+ self.url,
+ access_token=self.admin_user_tok,
+ content={"erase": True},
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Get user
+ channel = self.make_request(
+ "GET",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(True, channel.json_body["deactivated"])
+ self.assertEqual(0, len(channel.json_body["threepids"]))
+ self.assertIsNone(channel.json_body["avatar_url"])
+ self.assertIsNone(channel.json_body["displayname"])
+
+ self._is_erased("@user:test", True)
+
def _is_erased(self, user_id: str, expect: bool) -> None:
"""Assert that the user is erased or not"""
d = self.store.is_user_erased(user_id)
@@ -1150,7 +1200,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
@@ -1160,7 +1210,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=b"{}",
)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self):
@@ -1177,6 +1227,58 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
+ def test_get_user(self):
+ """
+ Test a simple get of a user.
+ """
+ channel = self.make_request(
+ "GET",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual("User", channel.json_body["displayname"])
+ self._check_fields(channel.json_body)
+
+ def test_get_user_with_sso(self):
+ """
+ Test get a user with SSO details.
+ """
+ self.get_success(
+ self.store.record_user_external_id(
+ "auth_provider1", "external_id1", self.other_user
+ )
+ )
+ self.get_success(
+ self.store.record_user_external_id(
+ "auth_provider2", "external_id2", self.other_user
+ )
+ )
+
+ channel = self.make_request(
+ "GET",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(
+ "external_id1", channel.json_body["external_ids"][0]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider1", channel.json_body["external_ids"][0]["auth_provider"]
+ )
+ self.assertEqual(
+ "external_id2", channel.json_body["external_ids"][1]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider2", channel.json_body["external_ids"][1]["auth_provider"]
+ )
+ self._check_fields(channel.json_body)
+
def test_create_server_admin(self):
"""
Check that a new admin user is created successfully.
@@ -1184,30 +1286,29 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user (server admin)
- body = json.dumps(
- {
- "password": "abc123",
- "admin": True,
- "displayname": "Bob's name",
- "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
- "avatar_url": "mxc://fibble/wibble",
- }
- )
+ body = {
+ "password": "abc123",
+ "admin": True,
+ "displayname": "Bob's name",
+ "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+ "avatar_url": "mxc://fibble/wibble",
+ }
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content=body,
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertTrue(channel.json_body["admin"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
+ self._check_fields(channel.json_body)
# Get user
channel = self.make_request(
@@ -1216,7 +1317,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1225,6 +1326,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertFalse(channel.json_body["is_guest"])
self.assertFalse(channel.json_body["deactivated"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
+ self._check_fields(channel.json_body)
def test_create_user(self):
"""
@@ -1233,30 +1335,29 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
- body = json.dumps(
- {
- "password": "abc123",
- "admin": False,
- "displayname": "Bob's name",
- "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
- "avatar_url": "mxc://fibble/wibble",
- }
- )
+ body = {
+ "password": "abc123",
+ "admin": False,
+ "displayname": "Bob's name",
+ "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+ "avatar_url": "mxc://fibble/wibble",
+ }
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content=body,
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertFalse(channel.json_body["admin"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
+ self._check_fields(channel.json_body)
# Get user
channel = self.make_request(
@@ -1265,7 +1366,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1275,6 +1376,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertFalse(channel.json_body["deactivated"])
self.assertFalse(channel.json_body["shadow_banned"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
+ self._check_fields(channel.json_body)
@override_config(
{"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
@@ -1311,16 +1413,14 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
- body = json.dumps({"password": "abc123", "admin": False})
-
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"password": "abc123", "admin": False},
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@@ -1350,17 +1450,15 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
- body = json.dumps({"password": "abc123", "admin": False})
-
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"password": "abc123", "admin": False},
)
# Admin user is not blocked by mau anymore
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@@ -1382,21 +1480,19 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
- body = json.dumps(
- {
- "password": "abc123",
- "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
- }
- )
+ body = {
+ "password": "abc123",
+ "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+ }
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content=body,
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1426,21 +1522,19 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
- body = json.dumps(
- {
- "password": "abc123",
- "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
- }
- )
+ body = {
+ "password": "abc123",
+ "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+ }
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content=body,
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1457,16 +1551,15 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"""
# Change password
- body = json.dumps({"password": "hahaha"})
-
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"password": "hahaha"},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self._check_fields(channel.json_body)
def test_set_displayname(self):
"""
@@ -1474,16 +1567,14 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"""
# Modify user
- body = json.dumps({"displayname": "foobar"})
-
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"displayname": "foobar"},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
@@ -1494,7 +1585,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
@@ -1504,18 +1595,14 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"""
# Delete old and add new threepid to user
- body = json.dumps(
- {"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]}
- )
-
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
- content=body.encode(encoding="utf_8"),
+ content={"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1527,7 +1614,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1552,7 +1639,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
@@ -1567,7 +1654,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"deactivated": True},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
@@ -1583,7 +1670,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
@@ -1610,7 +1697,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"deactivated": True},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
@@ -1626,7 +1713,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"displayname": "Foobar"},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertEqual("Foobar", channel.json_body["displayname"])
@@ -1650,7 +1737,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(400, channel.code, msg=channel.json_body)
# Reactivate the user.
channel = self.make_request(
@@ -1659,7 +1746,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNotNone(channel.json_body["password_hash"])
@@ -1681,7 +1768,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
@@ -1691,7 +1778,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
@@ -1713,7 +1800,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
@@ -1723,7 +1810,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
@@ -1742,7 +1829,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"admin": True},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
@@ -1753,7 +1840,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
@@ -1772,7 +1859,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "abc123"},
)
- self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
@@ -1783,7 +1870,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
self.assertEqual(0, channel.json_body["deactivated"])
@@ -1796,7 +1883,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "abc123", "deactivated": "false"},
)
- self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(400, channel.code, msg=channel.json_body)
# Check user is not deactivated
channel = self.make_request(
@@ -1805,7 +1892,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
@@ -1830,7 +1917,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": True},
)
- self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self._is_erased(user_id, False)
@@ -1838,6 +1925,25 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertIsNone(self.get_success(d))
self._is_erased(user_id, True)
+ def _check_fields(self, content: JsonDict):
+ """Checks that the expected user attributes are present in content
+
+ Args:
+ content: Content dictionary to check
+ """
+ self.assertIn("displayname", content)
+ self.assertIn("threepids", content)
+ self.assertIn("avatar_url", content)
+ self.assertIn("admin", content)
+ self.assertIn("deactivated", content)
+ self.assertIn("shadow_banned", content)
+ self.assertIn("password_hash", content)
+ self.assertIn("creation_ts", content)
+ self.assertIn("appservice_id", content)
+ self.assertIn("consent_server_notice_sent", content)
+ self.assertIn("consent_version", content)
+ self.assertIn("external_ids", content)
+
class UserMembershipRestTestCase(unittest.HomeserverTestCase):
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index ed55a640..69798e95 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -52,6 +52,7 @@ class RestHelper:
room_version: str = None,
tok: str = None,
expect_code: int = 200,
+ extra_content: Optional[Dict] = None,
) -> str:
"""
Create a room.
@@ -72,7 +73,7 @@ class RestHelper:
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/_matrix/client/r0/createRoom"
- content = {}
+ content = extra_content or {}
if not is_public:
content["visibility"] = "private"
if room_version:
diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py
index 485e3650..6b90f838 100644
--- a/tests/rest/client/v2_alpha/test_auth.py
+++ b/tests/rest/client/v2_alpha/test_auth.py
@@ -20,7 +20,7 @@ import synapse.rest.admin
from synapse.api.constants import LoginType
from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker
from synapse.rest.client.v1 import login
-from synapse.rest.client.v2_alpha import auth, devices, register
+from synapse.rest.client.v2_alpha import account, auth, devices, register
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.types import JsonDict, UserID
@@ -498,3 +498,221 @@ class UIAuthTests(unittest.HomeserverTestCase):
self.delete_device(
self.user_tok, self.device_id, 403, body={"auth": {"session": session_id}}
)
+
+
+class RefreshAuthTests(unittest.HomeserverTestCase):
+ servlets = [
+ auth.register_servlets,
+ account.register_servlets,
+ login.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ register.register_servlets,
+ ]
+ hijack_auth = False
+
+ def prepare(self, reactor, clock, hs):
+ self.user_pass = "pass"
+ self.user = self.register_user("test", self.user_pass)
+
+ def test_login_issue_refresh_token(self):
+ """
+ A login response should include a refresh_token only if asked.
+ """
+ # Test login
+ body = {"type": "m.login.password", "user": "test", "password": self.user_pass}
+
+ login_without_refresh = self.make_request(
+ "POST", "/_matrix/client/r0/login", body
+ )
+ self.assertEqual(login_without_refresh.code, 200, login_without_refresh.result)
+ self.assertNotIn("refresh_token", login_without_refresh.json_body)
+
+ login_with_refresh = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login?org.matrix.msc2918.refresh_token=true",
+ body,
+ )
+ self.assertEqual(login_with_refresh.code, 200, login_with_refresh.result)
+ self.assertIn("refresh_token", login_with_refresh.json_body)
+ self.assertIn("expires_in_ms", login_with_refresh.json_body)
+
+ def test_register_issue_refresh_token(self):
+ """
+ A register response should include a refresh_token only if asked.
+ """
+ register_without_refresh = self.make_request(
+ "POST",
+ "/_matrix/client/r0/register",
+ {
+ "username": "test2",
+ "password": self.user_pass,
+ "auth": {"type": LoginType.DUMMY},
+ },
+ )
+ self.assertEqual(
+ register_without_refresh.code, 200, register_without_refresh.result
+ )
+ self.assertNotIn("refresh_token", register_without_refresh.json_body)
+
+ register_with_refresh = self.make_request(
+ "POST",
+ "/_matrix/client/r0/register?org.matrix.msc2918.refresh_token=true",
+ {
+ "username": "test3",
+ "password": self.user_pass,
+ "auth": {"type": LoginType.DUMMY},
+ },
+ )
+ self.assertEqual(register_with_refresh.code, 200, register_with_refresh.result)
+ self.assertIn("refresh_token", register_with_refresh.json_body)
+ self.assertIn("expires_in_ms", register_with_refresh.json_body)
+
+ def test_token_refresh(self):
+ """
+ A refresh token can be used to issue a new access token.
+ """
+ body = {"type": "m.login.password", "user": "test", "password": self.user_pass}
+ login_response = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login?org.matrix.msc2918.refresh_token=true",
+ body,
+ )
+ self.assertEqual(login_response.code, 200, login_response.result)
+
+ refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": login_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(refresh_response.code, 200, refresh_response.result)
+ self.assertIn("access_token", refresh_response.json_body)
+ self.assertIn("refresh_token", refresh_response.json_body)
+ self.assertIn("expires_in_ms", refresh_response.json_body)
+
+ # The access and refresh tokens should be different from the original ones after refresh
+ self.assertNotEqual(
+ login_response.json_body["access_token"],
+ refresh_response.json_body["access_token"],
+ )
+ self.assertNotEqual(
+ login_response.json_body["refresh_token"],
+ refresh_response.json_body["refresh_token"],
+ )
+
+ @override_config({"access_token_lifetime": "1m"})
+ def test_refresh_token_expiration(self):
+ """
+ The access token should have some time as specified in the config.
+ """
+ body = {"type": "m.login.password", "user": "test", "password": self.user_pass}
+ login_response = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login?org.matrix.msc2918.refresh_token=true",
+ body,
+ )
+ self.assertEqual(login_response.code, 200, login_response.result)
+ self.assertApproximates(
+ login_response.json_body["expires_in_ms"], 60 * 1000, 100
+ )
+
+ refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": login_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(refresh_response.code, 200, refresh_response.result)
+ self.assertApproximates(
+ refresh_response.json_body["expires_in_ms"], 60 * 1000, 100
+ )
+
+ def test_refresh_token_invalidation(self):
+ """Refresh tokens are invalidated after first use of the next token.
+
+ A refresh token is considered invalid if:
+ - it was already used at least once
+ - and either
+ - the next access token was used
+ - the next refresh token was used
+
+ The chain of tokens goes like this:
+
+ login -|-> first_refresh -> third_refresh (fails)
+ |-> second_refresh -> fifth_refresh
+ |-> fourth_refresh (fails)
+ """
+
+ body = {"type": "m.login.password", "user": "test", "password": self.user_pass}
+ login_response = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login?org.matrix.msc2918.refresh_token=true",
+ body,
+ )
+ self.assertEqual(login_response.code, 200, login_response.result)
+
+ # This first refresh should work properly
+ first_refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": login_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(
+ first_refresh_response.code, 200, first_refresh_response.result
+ )
+
+ # This one as well, since the token in the first one was never used
+ second_refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": login_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(
+ second_refresh_response.code, 200, second_refresh_response.result
+ )
+
+ # This one should not, since the token from the first refresh is not valid anymore
+ third_refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": first_refresh_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(
+ third_refresh_response.code, 401, third_refresh_response.result
+ )
+
+ # The associated access token should also be invalid
+ whoami_response = self.make_request(
+ "GET",
+ "/_matrix/client/r0/account/whoami",
+ access_token=first_refresh_response.json_body["access_token"],
+ )
+ self.assertEqual(whoami_response.code, 401, whoami_response.result)
+
+ # But all other tokens should work (they will expire after some time)
+ for access_token in [
+ second_refresh_response.json_body["access_token"],
+ login_response.json_body["access_token"],
+ ]:
+ whoami_response = self.make_request(
+ "GET", "/_matrix/client/r0/account/whoami", access_token=access_token
+ )
+ self.assertEqual(whoami_response.code, 200, whoami_response.result)
+
+ # Now that the access token from the last valid refresh was used once, refreshing with the N-1 token should fail
+ fourth_refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": login_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(
+ fourth_refresh_response.code, 403, fourth_refresh_response.result
+ )
+
+ # But refreshing from the last valid refresh token still works
+ fifth_refresh_response = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2918.refresh_token/refresh",
+ {"refresh_token": second_refresh_response.json_body["refresh_token"]},
+ )
+ self.assertEqual(
+ fifth_refresh_response.code, 200, fifth_refresh_response.result
+ )
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 012910f1..cdca3a3e 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -41,35 +41,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
channel = self.make_request("GET", "/sync")
self.assertEqual(channel.code, 200)
- self.assertTrue(
- {
- "next_batch",
- "rooms",
- "presence",
- "account_data",
- "to_device",
- "device_lists",
- }.issubset(set(channel.json_body.keys()))
- )
-
- def test_sync_presence_disabled(self):
- """
- When presence is disabled, the key does not appear in /sync.
- """
- self.hs.config.use_presence = False
-
- channel = self.make_request("GET", "/sync")
-
- self.assertEqual(channel.code, 200)
- self.assertTrue(
- {
- "next_batch",
- "rooms",
- "account_data",
- "to_device",
- "device_lists",
- }.issubset(set(channel.json_body.keys()))
- )
+ self.assertIn("next_batch", channel.json_body)
class SyncFilterTestCase(unittest.HomeserverTestCase):
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index d46521cc..3245aa91 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -306,8 +306,9 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
channel = self.make_request("GET", "/sync?timeout=0", access_token=tok)
- invites = channel.json_body["rooms"]["invite"]
- self.assertEqual(len(invites), 0, invites)
+ self.assertNotIn(
+ "rooms", channel.json_body, "Got invites without server notice"
+ )
def test_invite_with_notice(self):
"""Tests that, if the MAU limit is hit, the server notices user invites each user
@@ -364,7 +365,8 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
# We could also pick another user and sync with it, which would return an
# invite to a system notices room, but it doesn't matter which user we're
# using so we use the last one because it saves us an extra sync.
- invites = channel.json_body["rooms"]["invite"]
+ if "rooms" in channel.json_body:
+ invites = channel.json_body["rooms"]["invite"]
# Make sure we have an invite to process.
self.assertEqual(len(invites), 1, invites)
diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py
index 377904e7..6578f341 100644
--- a/tests/util/test_lrucache.py
+++ b/tests/util/test_lrucache.py
@@ -15,7 +15,7 @@
from unittest.mock import Mock
-from synapse.util.caches.lrucache import LruCache
+from synapse.util.caches.lrucache import LruCache, setup_expire_lru_cache_entries
from synapse.util.caches.treecache import TreeCache
from tests import unittest
@@ -260,3 +260,47 @@ class LruCacheSizedTestCase(unittest.HomeserverTestCase):
self.assertEquals(cache["key3"], [3])
self.assertEquals(cache["key4"], [4])
self.assertEquals(cache["key5"], [5, 6])
+
+
+class TimeEvictionTestCase(unittest.HomeserverTestCase):
+ """Test that time based eviction works correctly."""
+
+ def default_config(self):
+ config = super().default_config()
+
+ config.setdefault("caches", {})["expiry_time"] = "30m"
+
+ return config
+
+ def test_evict(self):
+ setup_expire_lru_cache_entries(self.hs)
+
+ cache = LruCache(5, clock=self.hs.get_clock())
+
+ # Check that we evict entries we haven't accessed for 30 minutes.
+ cache["key1"] = 1
+ cache["key2"] = 2
+
+ self.reactor.advance(20 * 60)
+
+ self.assertEqual(cache.get("key1"), 1)
+
+ self.reactor.advance(20 * 60)
+
+ # We have only touched `key1` in the last 30m, so we expect that to
+ # still be in the cache while `key2` should have been evicted.
+ self.assertEqual(cache.get("key1"), 1)
+ self.assertEqual(cache.get("key2"), None)
+
+ # Check that re-adding an expired key works correctly.
+ cache["key2"] = 3
+ self.assertEqual(cache.get("key2"), 3)
+
+ self.reactor.advance(20 * 60)
+
+ self.assertEqual(cache.get("key2"), 3)
+
+ self.reactor.advance(20 * 60)
+
+ self.assertEqual(cache.get("key1"), None)
+ self.assertEqual(cache.get("key2"), 3)