summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorErik Johnston <erikj@matrix.org>2015-11-19 13:21:10 +0000
committerErik Johnston <erikj@matrix.org>2015-11-19 13:21:10 +0000
commiteb3677f58ffeddc534bc8ceb2adb060d0794b817 (patch)
tree4377eb0dc5e221862489bdcc802e50e2f1f41cb1
Imported Upstream version 0.11.0
-rw-r--r--.gitignore48
-rw-r--r--AUTHORS.rst50
-rw-r--r--CHANGES.rst781
-rw-r--r--CONTRIBUTING.rst118
-rw-r--r--LICENSE177
-rw-r--r--MANIFEST.in25
-rw-r--r--MAP.rst35
-rw-r--r--README.rst519
-rw-r--r--UPGRADE.rst255
-rwxr-xr-xcontrib/cmdclient/console.py747
-rw-r--r--contrib/cmdclient/http.py217
-rw-r--r--contrib/experiments/cursesio.py168
-rw-r--r--contrib/experiments/test_messaging.py394
-rw-r--r--contrib/graph/graph.py151
-rw-r--r--contrib/graph/graph2.py157
-rw-r--r--contrib/jitsimeetbridge/jitsimeetbridge.py260
-rw-r--r--contrib/jitsimeetbridge/syweb-jitsi-conference.patch188
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js712
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js408
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js254
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe/base64.js83
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe/md5.js279
-rw-r--r--contrib/jitsimeetbridge/unjingle/strophe/strophe.js3256
-rw-r--r--contrib/jitsimeetbridge/unjingle/unjingle.js48
-rwxr-xr-xcontrib/scripts/kick_users.py93
-rw-r--r--contrib/systemd/log_config.yaml25
-rw-r--r--contrib/systemd/synapse.service16
-rw-r--r--contrib/vertobot/.gitignore2
-rwxr-xr-xcontrib/vertobot/bot.pl309
-rwxr-xr-xcontrib/vertobot/bridge.pl493
-rw-r--r--contrib/vertobot/config.yaml32
-rw-r--r--contrib/vertobot/cpanfile14
-rw-r--r--contrib/vertobot/verto-example.json207
-rw-r--r--demo/README22
-rwxr-xr-xdemo/clean.sh19
-rw-r--r--demo/demo.tls.dh9
-rwxr-xr-xdemo/start.sh56
-rwxr-xr-xdemo/stop.sh14
-rw-r--r--demo/webserver.py62
-rw-r--r--docs/CAPTCHA_SETUP31
-rw-r--r--docs/README.rst6
-rw-r--r--docs/ancient_architecture_notes.rst59
-rw-r--r--docs/application_services.rst36
-rw-r--r--docs/architecture.rst68
-rw-r--r--docs/code_style.rst49
-rw-r--r--docs/media_repository.rst27
-rw-r--r--docs/metrics-howto.rst50
-rw-r--r--docs/postgres.rst107
-rw-r--r--docs/sphinx/README.rst1
-rw-r--r--docs/sphinx/conf.py271
-rw-r--r--docs/sphinx/index.rst20
-rw-r--r--docs/sphinx/modules.rst7
-rw-r--r--docs/sphinx/synapse.api.auth.rst7
-rw-r--r--docs/sphinx/synapse.api.constants.rst7
-rw-r--r--docs/sphinx/synapse.api.dbobjects.rst7
-rw-r--r--docs/sphinx/synapse.api.errors.rst7
-rw-r--r--docs/sphinx/synapse.api.event_stream.rst7
-rw-r--r--docs/sphinx/synapse.api.events.factory.rst7
-rw-r--r--docs/sphinx/synapse.api.events.room.rst7
-rw-r--r--docs/sphinx/synapse.api.events.rst18
-rw-r--r--docs/sphinx/synapse.api.handlers.events.rst7
-rw-r--r--docs/sphinx/synapse.api.handlers.factory.rst7
-rw-r--r--docs/sphinx/synapse.api.handlers.federation.rst7
-rw-r--r--docs/sphinx/synapse.api.handlers.register.rst7
-rw-r--r--docs/sphinx/synapse.api.handlers.room.rst7
-rw-r--r--docs/sphinx/synapse.api.handlers.rst21
-rw-r--r--docs/sphinx/synapse.api.notifier.rst7
-rw-r--r--docs/sphinx/synapse.api.register_events.rst7
-rw-r--r--docs/sphinx/synapse.api.room_events.rst7
-rw-r--r--docs/sphinx/synapse.api.rst30
-rw-r--r--docs/sphinx/synapse.api.server.rst7
-rw-r--r--docs/sphinx/synapse.api.storage.rst7
-rw-r--r--docs/sphinx/synapse.api.stream.rst7
-rw-r--r--docs/sphinx/synapse.api.streams.event.rst7
-rw-r--r--docs/sphinx/synapse.api.streams.rst17
-rw-r--r--docs/sphinx/synapse.app.homeserver.rst7
-rw-r--r--docs/sphinx/synapse.app.rst17
-rw-r--r--docs/sphinx/synapse.db.rst10
-rw-r--r--docs/sphinx/synapse.federation.handler.rst7
-rw-r--r--docs/sphinx/synapse.federation.messaging.rst7
-rw-r--r--docs/sphinx/synapse.federation.pdu_codec.rst7
-rw-r--r--docs/sphinx/synapse.federation.persistence.rst7
-rw-r--r--docs/sphinx/synapse.federation.replication.rst7
-rw-r--r--docs/sphinx/synapse.federation.rst22
-rw-r--r--docs/sphinx/synapse.federation.transport.rst7
-rw-r--r--docs/sphinx/synapse.federation.units.rst7
-rw-r--r--docs/sphinx/synapse.persistence.rst19
-rw-r--r--docs/sphinx/synapse.persistence.service.rst7
-rw-r--r--docs/sphinx/synapse.persistence.tables.rst7
-rw-r--r--docs/sphinx/synapse.persistence.transactions.rst7
-rw-r--r--docs/sphinx/synapse.rest.base.rst7
-rw-r--r--docs/sphinx/synapse.rest.events.rst7
-rw-r--r--docs/sphinx/synapse.rest.register.rst7
-rw-r--r--docs/sphinx/synapse.rest.room.rst7
-rw-r--r--docs/sphinx/synapse.rest.rst20
-rw-r--r--docs/sphinx/synapse.rst30
-rw-r--r--docs/sphinx/synapse.server.rst7
-rw-r--r--docs/sphinx/synapse.state.rst7
-rw-r--r--docs/sphinx/synapse.util.async.rst7
-rw-r--r--docs/sphinx/synapse.util.dbutils.rst7
-rw-r--r--docs/sphinx/synapse.util.http.rst7
-rw-r--r--docs/sphinx/synapse.util.lockutils.rst7
-rw-r--r--docs/sphinx/synapse.util.logutils.rst7
-rw-r--r--docs/sphinx/synapse.util.rst21
-rw-r--r--docs/sphinx/synapse.util.stringutils.rst7
-rw-r--r--docs/turn-howto.rst93
-rwxr-xr-xjenkins.sh39
-rw-r--r--pylint.cfg280
-rw-r--r--scripts-dev/check_auth.py64
-rw-r--r--scripts-dev/check_event_hash.py50
-rw-r--r--scripts-dev/check_signature.py71
-rw-r--r--scripts-dev/convert_server_keys.py116
-rwxr-xr-xscripts-dev/copyrighter-sql.pl33
-rwxr-xr-xscripts-dev/copyrighter.pl33
-rwxr-xr-xscripts-dev/database-save.sh16
-rwxr-xr-xscripts-dev/definitions.py142
-rw-r--r--scripts-dev/federation_client.py146
-rw-r--r--scripts-dev/hash_history.py69
-rwxr-xr-xscripts-dev/make_identicons.pl39
-rwxr-xr-xscripts-dev/nuke-room-from-db.sh24
-rw-r--r--scripts-dev/sphinx_api_docs.sh1
-rwxr-xr-xscripts/register_new_matrix_user154
-rwxr-xr-xscripts/synapse_port_db761
-rw-r--r--setup.cfg18
-rwxr-xr-xsetup.py90
-rw-r--r--synapse/__init__.py19
-rw-r--r--synapse/api/__init__.py14
-rw-r--r--synapse/api/auth.py943
-rw-r--r--synapse/api/constants.py88
-rw-r--r--synapse/api/errors.py243
-rw-r--r--synapse/api/filtering.py247
-rw-r--r--synapse/api/ratelimiting.py79
-rw-r--r--synapse/api/urls.py27
-rw-r--r--synapse/app/__init__.py14
-rwxr-xr-xsynapse/app/homeserver.py745
-rwxr-xr-xsynapse/app/synctl.py84
-rw-r--r--synapse/appservice/__init__.py226
-rw-r--r--synapse/appservice/api.py112
-rw-r--r--synapse/appservice/scheduler.py254
-rw-r--r--synapse/config/__init__.py14
-rw-r--r--synapse/config/__main__.py30
-rw-r--r--synapse/config/_base.py317
-rw-r--r--synapse/config/appservice.py27
-rw-r--r--synapse/config/captcha.py47
-rw-r--r--synapse/config/cas.py47
-rw-r--r--synapse/config/database.py79
-rw-r--r--synapse/config/homeserver.py45
-rw-r--r--synapse/config/key.py130
-rw-r--r--synapse/config/logger.py179
-rw-r--r--synapse/config/metrics.py33
-rw-r--r--synapse/config/password.py32
-rw-r--r--synapse/config/ratelimiting.py58
-rw-r--r--synapse/config/registration.py76
-rw-r--r--synapse/config/repository.py100
-rw-r--r--synapse/config/saml2.py55
-rw-r--r--synapse/config/server.py229
-rw-r--r--synapse/config/tls.py150
-rw-r--r--synapse/config/voip.py37
-rw-r--r--synapse/crypto/__init__.py14
-rw-r--r--synapse/crypto/context_factory.py49
-rw-r--r--synapse/crypto/event_signing.py113
-rw-r--r--synapse/crypto/keyclient.py133
-rw-r--r--synapse/crypto/keyring.py686
-rw-r--r--synapse/events/__init__.py163
-rw-r--r--synapse/events/builder.py72
-rw-r--r--synapse/events/snapshot.py22
-rw-r--r--synapse/events/utils.py170
-rw-r--r--synapse/events/validator.py92
-rw-r--r--synapse/federation/__init__.py31
-rw-r--r--synapse/federation/federation_base.py158
-rw-r--r--synapse/federation/federation_client.py732
-rw-r--r--synapse/federation/federation_server.py557
-rw-r--r--synapse/federation/persistence.py103
-rw-r--r--synapse/federation/replication.py78
-rw-r--r--synapse/federation/transaction_queue.py384
-rw-r--r--synapse/federation/transport/__init__.py74
-rw-r--r--synapse/federation/transport/client.py345
-rw-r--r--synapse/federation/transport/server.py453
-rw-r--r--synapse/federation/units.py129
-rw-r--r--synapse/handlers/__init__.py73
-rw-r--r--synapse/handlers/_base.py347
-rw-r--r--synapse/handlers/admin.py63
-rw-r--r--synapse/handlers/appservice.py195
-rw-r--r--synapse/handlers/auth.py480
-rw-r--r--synapse/handlers/directory.py259
-rw-r--r--synapse/handlers/events.py177
-rw-r--r--synapse/handlers/federation.py1715
-rw-r--r--synapse/handlers/identity.py144
-rw-r--r--synapse/handlers/message.py640
-rw-r--r--synapse/handlers/presence.py1302
-rw-r--r--synapse/handlers/private_user_data.py46
-rw-r--r--synapse/handlers/profile.py228
-rw-r--r--synapse/handlers/receipts.py202
-rw-r--r--synapse/handlers/register.py320
-rw-r--r--synapse/handlers/room.py879
-rw-r--r--synapse/handlers/search.py319
-rw-r--r--synapse/handlers/sync.py739
-rw-r--r--synapse/handlers/typing.py268
-rw-r--r--synapse/http/__init__.py14
-rw-r--r--synapse/http/client.py292
-rw-r--r--synapse/http/endpoint.py172
-rw-r--r--synapse/http/matrixfederationclient.py502
-rw-r--r--synapse/http/server.py325
-rw-r--r--synapse/http/servlet.py113
-rw-r--r--synapse/metrics/__init__.py200
-rw-r--r--synapse/metrics/metric.py155
-rw-r--r--synapse/metrics/resource.py39
-rw-r--r--synapse/notifier.py453
-rw-r--r--synapse/push/__init__.py474
-rw-r--r--synapse/push/baserules.py264
-rw-r--r--synapse/push/httppusher.py148
-rw-r--r--synapse/push/pusherpool.py197
-rw-r--r--synapse/push/rulekinds.py22
-rw-r--r--synapse/python_dependencies.py147
-rw-r--r--synapse/rest/__init__.py14
-rw-r--r--synapse/rest/client/__init__.py14
-rw-r--r--synapse/rest/client/v1/__init__.py44
-rw-r--r--synapse/rest/client/v1/admin.py49
-rw-r--r--synapse/rest/client/v1/base.py52
-rw-r--r--synapse/rest/client/v1/directory.py147
-rw-r--r--synapse/rest/client/v1/events.py96
-rw-r--r--synapse/rest/client/v1/initial_sync.py44
-rw-r--r--synapse/rest/client/v1/login.py413
-rw-r--r--synapse/rest/client/v1/presence.py146
-rw-r--r--synapse/rest/client/v1/profile.py114
-rw-r--r--synapse/rest/client/v1/push_rule.py462
-rw-r--r--synapse/rest/client/v1/pusher.py101
-rw-r--r--synapse/rest/client/v1/register.py368
-rw-r--r--synapse/rest/client/v1/room.py684
-rw-r--r--synapse/rest/client/v1/transactions.py95
-rw-r--r--synapse/rest/client/v1/voip.py60
-rw-r--r--synapse/rest/client/v2_alpha/__init__.py48
-rw-r--r--synapse/rest/client/v2_alpha/_base.py60
-rw-r--r--synapse/rest/client/v2_alpha/account.py158
-rw-r--r--synapse/rest/client/v2_alpha/auth.py190
-rw-r--r--synapse/rest/client/v2_alpha/filter.py104
-rw-r--r--synapse/rest/client/v2_alpha/keys.py317
-rw-r--r--synapse/rest/client/v2_alpha/receipts.py59
-rw-r--r--synapse/rest/client/v2_alpha/register.py266
-rw-r--r--synapse/rest/client/v2_alpha/sync.py365
-rw-r--r--synapse/rest/client/v2_alpha/tags.py106
-rw-r--r--synapse/rest/client/v2_alpha/tokenrefresh.py56
-rw-r--r--synapse/rest/key/__init__.py14
-rw-r--r--synapse/rest/key/v1/__init__.py14
-rw-r--r--synapse/rest/key/v1/server_key_resource.py93
-rw-r--r--synapse/rest/key/v2/__init__.py25
-rw-r--r--synapse/rest/key/v2/local_key_resource.py125
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py242
-rw-r--r--synapse/rest/media/__init__.py0
-rw-r--r--synapse/rest/media/v0/__init__.py0
-rw-r--r--synapse/rest/media/v0/content_repository.py212
-rw-r--r--synapse/rest/media/v1/__init__.py45
-rw-r--r--synapse/rest/media/v1/base_resource.py456
-rw-r--r--synapse/rest/media/v1/download_resource.py76
-rw-r--r--synapse/rest/media/v1/filepath.py67
-rw-r--r--synapse/rest/media/v1/identicon_resource.py65
-rw-r--r--synapse/rest/media/v1/media_repository.py80
-rw-r--r--synapse/rest/media/v1/thumbnail_resource.py288
-rw-r--r--synapse/rest/media/v1/thumbnailer.py89
-rw-r--r--synapse/rest/media/v1/upload_resource.py118
-rw-r--r--synapse/server.py224
-rw-r--r--synapse/state.py397
-rw-r--r--synapse/static/client/login/index.html50
-rw-r--r--synapse/static/client/login/js/jquery-2.1.3.min.js4
-rw-r--r--synapse/static/client/login/js/login.js153
-rw-r--r--synapse/static/client/login/spinner.gifbin0 -> 1849 bytes
-rw-r--r--synapse/static/client/login/style.css57
-rw-r--r--synapse/static/client/register/index.html32
-rw-r--r--synapse/static/client/register/js/jquery-2.1.3.min.js4
-rw-r--r--synapse/static/client/register/js/recaptcha_ajax.js195
-rw-r--r--synapse/static/client/register/js/register.js117
-rw-r--r--synapse/static/client/register/register_config.sample.js3
-rw-r--r--synapse/static/client/register/style.css60
-rw-r--r--synapse/storage/__init__.py164
-rw-r--r--synapse/storage/_base.py729
-rw-r--r--synapse/storage/appservice.py471
-rw-r--r--synapse/storage/background_updates.py256
-rw-r--r--synapse/storage/directory.py152
-rw-r--r--synapse/storage/end_to_end_keys.py125
-rw-r--r--synapse/storage/engines/__init__.py41
-rw-r--r--synapse/storage/engines/_base.py18
-rw-r--r--synapse/storage/engines/postgres.py58
-rw-r--r--synapse/storage/engines/sqlite3.py74
-rw-r--r--synapse/storage/event_federation.py432
-rw-r--r--synapse/storage/events.py966
-rw-r--r--synapse/storage/filtering.py64
-rw-r--r--synapse/storage/keys.py205
-rw-r--r--synapse/storage/media_repository.py137
-rw-r--r--synapse/storage/prepare_database.py395
-rw-r--r--synapse/storage/presence.py163
-rw-r--r--synapse/storage/profile.py57
-rw-r--r--synapse/storage/push_rule.py278
-rw-r--r--synapse/storage/pusher.py153
-rw-r--r--synapse/storage/receipts.py406
-rw-r--r--synapse/storage/registration.py305
-rw-r--r--synapse/storage/rejections.py44
-rw-r--r--synapse/storage/room.py287
-rw-r--r--synapse/storage/roommember.py271
-rw-r--r--synapse/storage/schema/delta/11/v11.sql16
-rw-r--r--synapse/storage/schema/delta/12/v12.sql63
-rw-r--r--synapse/storage/schema/delta/13/v13.sql31
-rw-r--r--synapse/storage/schema/delta/14/upgrade_appservice_db.py37
-rw-r--r--synapse/storage/schema/delta/14/v14.sql23
-rw-r--r--synapse/storage/schema/delta/15/appservice_txns.sql31
-rw-r--r--synapse/storage/schema/delta/15/presence_indices.sql2
-rw-r--r--synapse/storage/schema/delta/15/v15.sql25
-rw-r--r--synapse/storage/schema/delta/16/events_order_index.sql4
-rw-r--r--synapse/storage/schema/delta/16/remote_media_cache_index.sql2
-rw-r--r--synapse/storage/schema/delta/16/remove_duplicates.sql9
-rw-r--r--synapse/storage/schema/delta/16/room_alias_index.sql3
-rw-r--r--synapse/storage/schema/delta/16/unique_constraints.sql80
-rw-r--r--synapse/storage/schema/delta/16/users.sql56
-rw-r--r--synapse/storage/schema/delta/17/drop_indexes.sql18
-rw-r--r--synapse/storage/schema/delta/17/server_keys.sql24
-rw-r--r--synapse/storage/schema/delta/17/user_threepids.sql9
-rw-r--r--synapse/storage/schema/delta/18/server_keys_bigger_ints.sql32
-rw-r--r--synapse/storage/schema/delta/19/event_index.sql19
-rw-r--r--synapse/storage/schema/delta/20/dummy.sql1
-rw-r--r--synapse/storage/schema/delta/20/pushers.py76
-rw-r--r--synapse/storage/schema/delta/21/end_to_end_keys.sql34
-rw-r--r--synapse/storage/schema/delta/21/receipts.sql38
-rw-r--r--synapse/storage/schema/delta/22/receipts_index.sql18
-rw-r--r--synapse/storage/schema/delta/22/user_threepids_unique.sql19
-rw-r--r--synapse/storage/schema/delta/23/drop_state_index.sql16
-rw-r--r--synapse/storage/schema/delta/23/refresh_tokens.sql21
-rw-r--r--synapse/storage/schema/delta/24/stats_reporting.sql22
-rw-r--r--synapse/storage/schema/delta/25/00background_updates.sql21
-rw-r--r--synapse/storage/schema/delta/25/fts.py78
-rw-r--r--synapse/storage/schema/delta/25/guest_access.sql25
-rw-r--r--synapse/storage/schema/delta/25/history_visibility.sql25
-rw-r--r--synapse/storage/schema/delta/25/tags.sql38
-rw-r--r--synapse/storage/schema/full_schemas/11/event_edges.sql89
-rw-r--r--synapse/storage/schema/full_schemas/11/event_signatures.sql55
-rw-r--r--synapse/storage/schema/full_schemas/11/im.sql123
-rw-r--r--synapse/storage/schema/full_schemas/11/keys.sql31
-rw-r--r--synapse/storage/schema/full_schemas/11/media_repository.sql65
-rw-r--r--synapse/storage/schema/full_schemas/11/presence.sql35
-rw-r--r--synapse/storage/schema/full_schemas/11/profiles.sql19
-rw-r--r--synapse/storage/schema/full_schemas/11/redactions.sql22
-rw-r--r--synapse/storage/schema/full_schemas/11/room_aliases.sql24
-rw-r--r--synapse/storage/schema/full_schemas/11/state.sql40
-rw-r--r--synapse/storage/schema/full_schemas/11/transactions.sql63
-rw-r--r--synapse/storage/schema/full_schemas/11/users.sql43
-rw-r--r--synapse/storage/schema/full_schemas/16/application_services.sql48
-rw-r--r--synapse/storage/schema/full_schemas/16/event_edges.sql89
-rw-r--r--synapse/storage/schema/full_schemas/16/event_signatures.sql55
-rw-r--r--synapse/storage/schema/full_schemas/16/im.sql128
-rw-r--r--synapse/storage/schema/full_schemas/16/keys.sql31
-rw-r--r--synapse/storage/schema/full_schemas/16/media_repository.sql68
-rw-r--r--synapse/storage/schema/full_schemas/16/presence.sql40
-rw-r--r--synapse/storage/schema/full_schemas/16/profiles.sql20
-rw-r--r--synapse/storage/schema/full_schemas/16/push.sql74
-rw-r--r--synapse/storage/schema/full_schemas/16/redactions.sql22
-rw-r--r--synapse/storage/schema/full_schemas/16/room_aliases.sql29
-rw-r--r--synapse/storage/schema/full_schemas/16/state.sql40
-rw-r--r--synapse/storage/schema/full_schemas/16/transactions.sql63
-rw-r--r--synapse/storage/schema/full_schemas/16/users.sql42
-rw-r--r--synapse/storage/schema/schema_version.sql27
-rw-r--r--synapse/storage/search.py307
-rw-r--r--synapse/storage/signatures.py90
-rw-r--r--synapse/storage/state.py441
-rw-r--r--synapse/storage/stream.py602
-rw-r--r--synapse/storage/tags.py216
-rw-r--r--synapse/storage/transactions.py355
-rw-r--r--synapse/storage/util/__init__.py14
-rw-r--r--synapse/storage/util/id_generators.py169
-rw-r--r--synapse/streams/__init__.py14
-rw-r--r--synapse/streams/config.py116
-rw-r--r--synapse/streams/events.py61
-rw-r--r--synapse/types.py215
-rw-r--r--synapse/util/__init__.py127
-rw-r--r--synapse/util/async.py104
-rw-r--r--synapse/util/caches/__init__.py27
-rw-r--r--synapse/util/caches/descriptors.py377
-rw-r--r--synapse/util/caches/dictionary_cache.py103
-rw-r--r--synapse/util/caches/expiringcache.py115
-rw-r--r--synapse/util/caches/lrucache.py149
-rw-r--r--synapse/util/debug.py72
-rw-r--r--synapse/util/distributor.py131
-rw-r--r--synapse/util/frozenutils.py51
-rw-r--r--synapse/util/jsonobject.py90
-rw-r--r--synapse/util/logcontext.py197
-rw-r--r--synapse/util/logutils.py170
-rw-r--r--synapse/util/ratelimitutils.py216
-rw-r--r--synapse/util/retryutils.py156
-rw-r--r--synapse/util/stringutils.py46
l---------synctl1
-rw-r--r--tests/__init__.py15
-rw-r--r--tests/api/__init__.py0
-rw-r--r--tests/api/test_auth.py295
-rw-r--r--tests/api/test_filtering.py507
-rw-r--r--tests/api/test_ratelimiting.py39
-rw-r--r--tests/appservice/__init__.py14
-rw-r--r--tests/appservice/test_appservice.py229
-rw-r--r--tests/appservice/test_scheduler.py252
-rw-r--r--tests/crypto/__init__.py15
-rw-r--r--tests/crypto/test_event_signing.py114
-rw-r--r--tests/events/__init__.py0
-rw-r--r--tests/events/test_utils.py115
-rw-r--r--tests/federation/__init__.py0
-rw-r--r--tests/federation/test_federation.py301
-rw-r--r--tests/handlers/__init__.py0
-rw-r--r--tests/handlers/test_appservice.py139
-rw-r--r--tests/handlers/test_auth.py70
-rw-r--r--tests/handlers/test_directory.py110
-rw-r--r--tests/handlers/test_federation.py130
-rw-r--r--tests/handlers/test_presence.py1328
-rw-r--r--tests/handlers/test_presencelike.py311
-rw-r--r--tests/handlers/test_profile.py145
-rw-r--r--tests/handlers/test_room.py404
-rw-r--r--tests/handlers/test_typing.py414
-rw-r--r--tests/metrics/__init__.py0
-rw-r--r--tests/metrics/test_metric.py161
-rw-r--r--tests/rest/__init__.py14
-rw-r--r--tests/rest/client/__init__.py14
-rw-r--r--tests/rest/client/v1/__init__.py15
-rw-r--r--tests/rest/client/v1/test_events.py217
-rw-r--r--tests/rest/client/v1/test_presence.py411
-rw-r--r--tests/rest/client/v1/test_profile.py148
-rw-r--r--tests/rest/client/v1/test_rooms.py1052
-rw-r--r--tests/rest/client/v1/test_typing.py162
-rw-r--r--tests/rest/client/v1/utils.py131
-rw-r--r--tests/rest/client/v2_alpha/__init__.py62
-rw-r--r--tests/rest/client/v2_alpha/test_filter.py95
-rw-r--r--tests/rest/client/v2_alpha/test_register.py135
-rw-r--r--tests/storage/__init__.py0
-rw-r--r--tests/storage/event_injector.py81
-rw-r--r--tests/storage/test__base.py199
-rw-r--r--tests/storage/test_appservice.py407
-rw-r--r--tests/storage/test_background_update.py76
-rw-r--r--tests/storage/test_base.py200
-rw-r--r--tests/storage/test_directory.py79
-rw-r--r--tests/storage/test_events.py116
-rw-r--r--tests/storage/test_presence.py161
-rw-r--r--tests/storage/test_profile.py64
-rw-r--r--tests/storage/test_redaction.py254
-rw-r--r--tests/storage/test_registration.py130
-rw-r--r--tests/storage/test_room.py155
-rw-r--r--tests/storage/test_roommember.py160
-rw-r--r--tests/storage/test_stream.py185
-rw-r--r--tests/test_distributor.py118
-rw-r--r--tests/test_state.py641
-rw-r--r--tests/test_test_utils.py70
-rw-r--r--tests/test_types.py63
-rw-r--r--tests/unittest.py92
-rw-r--r--tests/util/__init__.py15
-rw-r--r--tests/util/test_dict_cache.py101
-rw-r--r--tests/util/test_log_context.py43
-rw-r--r--tests/util/test_lrucache.py54
-rw-r--r--tests/utils.py479
-rw-r--r--tox.ini28
451 files changed, 68200 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..f8c40001
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,48 @@
+*.pyc
+.*.swp
+
+.DS_Store
+_trial_temp/
+logs/
+dbs/
+*.egg
+dist/
+docs/build/
+*.egg-info
+
+cmdclient_config.json
+homeserver*.db
+homeserver*.log
+homeserver*.pid
+homeserver*.yaml
+
+*.signing.key
+*.tls.crt
+*.tls.dh
+*.tls.key
+
+.coverage
+htmlcov
+
+demo/*.db
+demo/*.log
+demo/*.log.*
+demo/*.pid
+demo/media_store.*
+demo/etc
+
+uploads
+
+.idea/
+media_store/
+
+*.tac
+
+build/
+
+localhost-800*/
+static/client/register/register_config.js
+.tox
+
+env/
+*.config
diff --git a/AUTHORS.rst b/AUTHORS.rst
new file mode 100644
index 00000000..58a67c6b
--- /dev/null
+++ b/AUTHORS.rst
@@ -0,0 +1,50 @@
+Erik Johnston <erik at matrix.org>
+ * HS core
+ * Federation API impl
+
+Mark Haines <mark at matrix.org>
+ * HS core
+ * Crypto
+ * Content repository
+ * CS v2 API impl
+
+Kegan Dougal <kegan at matrix.org>
+ * HS core
+ * CS v1 API impl
+ * AS API impl
+
+Paul "LeoNerd" Evans <paul at matrix.org>
+ * HS core
+ * Presence
+ * Typing Notifications
+ * Performance metrics and caching layer
+
+Dave Baker <dave at matrix.org>
+ * Push notifications
+ * Auth CS v2 impl
+
+Matthew Hodgson <matthew at matrix.org>
+ * General doc & housekeeping
+ * Vertobot/vertobridge matrix<->verto PoC
+
+Emmanuel Rohee <manu at matrix.org>
+ * Supporting iOS clients (testability and fallback registration)
+
+Turned to Dust <dwinslow86 at gmail.com>
+ * ArchLinux installation instructions
+
+Brabo <brabo at riseup.net>
+ * Installation instruction fixes
+
+Ivan Shapovalov <intelfx100 at gmail.com>
+ * contrib/systemd: a sample systemd unit file and a logger configuration
+
+Eric Myhre <hash at exultant.us>
+ * Fix bug where ``media_store_path`` config option was ignored by v0 content
+ repository API.
+
+Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
+ * Add SAML2 support for registration and login.
+
+Steven Hammerton <steven.hammerton at openmarket.com>
+ * Add CAS support for registration and login.
diff --git a/CHANGES.rst b/CHANGES.rst
new file mode 100644
index 00000000..068ed9f2
--- /dev/null
+++ b/CHANGES.rst
@@ -0,0 +1,781 @@
+Changes in synapse v0.11.0 (2015-11-17)
+=======================================
+
+* Change CAS login API (PR #349)
+
+Changes in synapse v0.11.0-rc2 (2015-11-13)
+===========================================
+
+* Various changes to /sync API response format (PR #373)
+* Fix regression when setting display name in newly joined room over
+ federation (PR #368)
+* Fix problem where /search was slow when using SQLite (PR #366)
+
+Changes in synapse v0.11.0-rc1 (2015-11-11)
+===========================================
+
+* Add Search API (PR #307, #324, #327, #336, #350, #359)
+* Add 'archived' state to v2 /sync API (PR #316)
+* Add ability to reject invites (PR #317)
+* Add config option to disable password login (PR #322)
+* Add the login fallback API (PR #330)
+* Add room context API (PR #334)
+* Add room tagging support (PR #335)
+* Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
+* Change retry schedule for application services (PR #320)
+* Change retry schedule for remote servers (PR #340)
+* Fix bug where we hosted static content in the incorrect place (PR #329)
+* Fix bug where we didn't increment retry interval for remote servers (PR #343)
+
+Changes in synapse v0.10.1-rc1 (2015-10-15)
+===========================================
+
+* Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
+* Add support for using macaroons for ``access_token`` (PR #256, #229)
+* Add support for ``m.room.canonical_alias`` (PR #287)
+* Add support for viewing the history of rooms that they have left. (PR #276,
+ #294)
+* Add support for refresh tokens (PR #240)
+* Add flag on creation which disables federation of the room (PR #279)
+* Add some room state to invites. (PR #275)
+* Atomically persist events when joining a room over federation (PR #283)
+* Change default history visibility for private rooms (PR #271)
+* Allow users to redact their own sent events (PR #262)
+* Use tox for tests (PR #247)
+* Split up syutil into separate libraries (PR #243)
+
+Changes in synapse v0.10.0-r2 (2015-09-16)
+==========================================
+
+* Fix bug where we always fetched remote server signing keys instead of using
+ ones in our cache.
+* Fix adding threepids to an existing account.
+* Fix bug with invinting over federation where remote server was already in
+ the room. (PR #281, SYN-392)
+
+Changes in synapse v0.10.0-r1 (2015-09-08)
+==========================================
+
+* Fix bug with python packaging
+
+Changes in synapse v0.10.0 (2015-09-03)
+=======================================
+
+No change from release candidate.
+
+Changes in synapse v0.10.0-rc6 (2015-09-02)
+===========================================
+
+* Remove some of the old database upgrade scripts.
+* Fix database port script to work with newly created sqlite databases.
+
+Changes in synapse v0.10.0-rc5 (2015-08-27)
+===========================================
+
+* Fix bug that broke downloading files with ascii filenames across federation.
+
+Changes in synapse v0.10.0-rc4 (2015-08-27)
+===========================================
+
+* Allow UTF-8 filenames for upload. (PR #259)
+
+Changes in synapse v0.10.0-rc3 (2015-08-25)
+===========================================
+
+* Add ``--keys-directory`` config option to specify where files such as
+ certs and signing keys should be stored in, when using ``--generate-config``
+ or ``--generate-keys``. (PR #250)
+* Allow ``--config-path`` to specify a directory, causing synapse to use all
+ \*.yaml files in the directory as config files. (PR #249)
+* Add ``web_client_location`` config option to specify static files to be
+ hosted by synapse under ``/_matrix/client``. (PR #245)
+* Add helper utility to synapse to read and parse the config files and extract
+ the value of a given key. For example::
+
+ $ python -m synapse.config read server_name -c homeserver.yaml
+ localhost
+
+ (PR #246)
+
+
+Changes in synapse v0.10.0-rc2 (2015-08-24)
+===========================================
+
+* Fix bug where we incorrectly populated the ``event_forward_extremities``
+ table, resulting in problems joining large remote rooms (e.g.
+ ``#matrix:matrix.org``)
+* Reduce the number of times we wake up pushers by not listening for presence
+ or typing events, reducing the CPU cost of each pusher.
+
+
+Changes in synapse v0.10.0-rc1 (2015-08-21)
+===========================================
+
+Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
+
+General:
+
+* Upgrade to Twisted 15 (PR #173)
+* Add support for serving and fetching encryption keys over federation.
+ (PR #208)
+* Add support for logging in with email address (PR #234)
+* Add support for new ``m.room.canonical_alias`` event. (PR #233)
+* Change synapse to treat user IDs case insensitively during registration and
+ login. (If two users already exist with case insensitive matching user ids,
+ synapse will continue to require them to specify their user ids exactly.)
+* Error if a user tries to register with an email already in use. (PR #211)
+* Add extra and improve existing caches (PR #212, #219, #226, #228)
+* Batch various storage request (PR #226, #228)
+* Fix bug where we didn't correctly log the entity that triggered the request
+ if the request came in via an application service (PR #230)
+* Fix bug where we needlessly regenerated the full list of rooms an AS is
+ interested in. (PR #232)
+* Add support for AS's to use v2_alpha registration API (PR #210)
+
+
+Configuration:
+
+* Add ``--generate-keys`` that will generate any missing cert and key files in
+ the configuration files. This is equivalent to running ``--generate-config``
+ on an existing configuration file. (PR #220)
+* ``--generate-config`` now no longer requires a ``--server-name`` parameter
+ when used on existing configuration files. (PR #220)
+* Add ``--print-pidfile`` flag that controls the printing of the pid to stdout
+ of the demonised process. (PR #213)
+
+Media Repository:
+
+* Fix bug where we picked a lower resolution image than requested. (PR #205)
+* Add support for specifying if a the media repository should dynamically
+ thumbnail images or not. (PR #206)
+
+Metrics:
+
+* Add statistics from the reactor to the metrics API. (PR #224, #225)
+
+Demo Homeservers:
+
+* Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
+* Fix enabling registration on demo homeservers (PR #223)
+
+
+Changes in synapse v0.9.4-rc1 (2015-07-21)
+==========================================
+
+General:
+
+* Add basic implementation of receipts. (SPEC-99)
+* Add support for configuration presets in room creation API. (PR #203)
+* Add auth event that limits the visibility of history for new users.
+ (SPEC-134)
+* Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
+* Add client side key management APIs for end to end encryption. (PR #198)
+* Change power level semantics so that you cannot kick, ban or change power
+ levels of users that have equal or greater power level than you. (SYN-192)
+* Improve performance by bulk inserting events where possible. (PR #193)
+* Improve performance by bulk verifying signatures where possible. (PR #194)
+
+
+Configuration:
+
+* Add support for including TLS certificate chains.
+
+Media Repository:
+
+* Add Content-Disposition headers to content repository responses. (SYN-150)
+
+
+Changes in synapse v0.9.3 (2015-07-01)
+======================================
+
+No changes from v0.9.3 Release Candidate 1.
+
+Changes in synapse v0.9.3-rc1 (2015-06-23)
+==========================================
+
+General:
+
+* Fix a memory leak in the notifier. (SYN-412)
+* Improve performance of room initial sync. (SYN-418)
+* General improvements to logging.
+* Remove ``access_token`` query params from ``INFO`` level logging.
+
+Configuration:
+
+* Add support for specifying and configuring multiple listeners. (SYN-389)
+
+Application services:
+
+* Fix bug where synapse failed to send user queries to application services.
+
+Changes in synapse v0.9.2-r2 (2015-06-15)
+=========================================
+
+Fix packaging so that schema delta python files get included in the package.
+
+Changes in synapse v0.9.2 (2015-06-12)
+======================================
+
+General:
+
+* Use ultrajson for json (de)serialisation when a canonical encoding is not
+ required. Ultrajson is significantly faster than simplejson in certain
+ circumstances.
+* Use connection pools for outgoing HTTP connections.
+* Process thumbnails on separate threads.
+
+Configuration:
+
+* Add option, ``gzip_responses``, to disable HTTP response compression.
+
+Federation:
+
+* Improve resilience of backfill by ensuring we fetch any missing auth events.
+* Improve performance of backfill and joining remote rooms by removing
+ unnecessary computations. This included handling events we'd previously
+ handled as well as attempting to compute the current state for outliers.
+
+
+Changes in synapse v0.9.1 (2015-05-26)
+======================================
+
+General:
+
+* Add support for backfilling when a client paginates. This allows servers to
+ request history for a room from remote servers when a client tries to
+ paginate history the server does not have - SYN-36
+* Fix bug where you couldn't disable non-default pushrules - SYN-378
+* Fix ``register_new_user`` script - SYN-359
+* Improve performance of fetching events from the database, this improves both
+ initialSync and sending of events.
+* Improve performance of event streams, allowing synapse to handle more
+ simultaneous connected clients.
+
+Federation:
+
+* Fix bug with existing backfill implementation where it returned the wrong
+ selection of events in some circumstances.
+* Improve performance of joining remote rooms.
+
+Configuration:
+
+* Add support for changing the bind host of the metrics listener via the
+ ``metrics_bind_host`` option.
+
+
+Changes in synapse v0.9.0-r5 (2015-05-21)
+=========================================
+
+* Add more database caches to reduce amount of work done for each pusher. This
+ radically reduces CPU usage when multiple pushers are set up in the same room.
+
+Changes in synapse v0.9.0 (2015-05-07)
+======================================
+
+General:
+
+* Add support for using a PostgreSQL database instead of SQLite. See
+ `docs/postgres.rst`_ for details.
+* Add password change and reset APIs. See `Registration`_ in the spec.
+* Fix memory leak due to not releasing stale notifiers - SYN-339.
+* Fix race in caches that occasionally caused some presence updates to be
+ dropped - SYN-369.
+* Check server name has not changed on restart.
+* Add a sample systemd unit file and a logger configuration in
+ contrib/systemd. Contributed Ivan Shapovalov.
+
+Federation:
+
+* Add key distribution mechanisms for fetching public keys of unavailable
+ remote home servers. See `Retrieving Server Keys`_ in the spec.
+
+Configuration:
+
+* Add support for multiple config files.
+* Add support for dictionaries in config files.
+* Remove support for specifying config options on the command line, except
+ for:
+
+ * ``--daemonize`` - Daemonize the home server.
+ * ``--manhole`` - Turn on the twisted telnet manhole service on the given
+ port.
+ * ``--database-path`` - The path to a sqlite database to use.
+ * ``--verbose`` - The verbosity level.
+ * ``--log-file`` - File to log to.
+ * ``--log-config`` - Python logging config file.
+ * ``--enable-registration`` - Enable registration for new users.
+
+Application services:
+
+* Reliably retry sending of events from Synapse to application services, as per
+ `Application Services`_ spec.
+* Application services can no longer register via the ``/register`` API,
+ instead their configuration should be saved to a file and listed in the
+ synapse ``app_service_config_files`` config option. The AS configuration file
+ has the same format as the old ``/register`` request.
+ See `docs/application_services.rst`_ for more information.
+
+.. _`docs/postgres.rst`: docs/postgres.rst
+.. _`docs/application_services.rst`: docs/application_services.rst
+.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
+.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
+.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
+
+Changes in synapse v0.8.1 (2015-03-18)
+======================================
+
+* Disable registration by default. New users can be added using the command
+ ``register_new_matrix_user`` or by enabling registration in the config.
+* Add metrics to synapse. To enable metrics use config options
+ ``enable_metrics`` and ``metrics_port``.
+* Fix bug where banning only kicked the user.
+
+Changes in synapse v0.8.0 (2015-03-06)
+======================================
+
+General:
+
+* Add support for registration fallback. This is a page hosted on the server
+ which allows a user to register for an account, regardless of what client
+ they are using (e.g. mobile devices).
+
+* Added new default push rules and made them configurable by clients:
+
+ * Suppress all notice messages.
+ * Notify when invited to a new room.
+ * Notify for messages that don't match any rule.
+ * Notify on incoming call.
+
+Federation:
+
+* Added per host server side rate-limiting of incoming federation requests.
+* Added a ``/get_missing_events/`` API to federation to reduce number of
+ ``/events/`` requests.
+
+Configuration:
+
+* Added configuration option to disable registration:
+ ``disable_registration``.
+* Added configuration option to change soft limit of number of open file
+ descriptors: ``soft_file_limit``.
+* Make ``tls_private_key_path`` optional when running with ``no_tls``.
+
+Application services:
+
+* Application services can now poll on the CS API ``/events`` for their events,
+ by providing their application service ``access_token``.
+* Added exclusive namespace support to application services API.
+
+
+Changes in synapse v0.7.1 (2015-02-19)
+======================================
+
+* Initial alpha implementation of parts of the Application Services API.
+ Including:
+
+ - AS Registration / Unregistration
+ - User Query API
+ - Room Alias Query API
+ - Push transport for receiving events.
+ - User/Alias namespace admin control
+
+* Add cache when fetching events from remote servers to stop repeatedly
+ fetching events with bad signatures.
+* Respect the per remote server retry scheme when fetching both events and
+ server keys to reduce the number of times we send requests to dead servers.
+* Inform remote servers when the local server fails to handle a received event.
+* Turn off python bytecode generation due to problems experienced when
+ upgrading from previous versions.
+
+Changes in synapse v0.7.0 (2015-02-12)
+======================================
+
+* Add initial implementation of the query auth federation API, allowing
+ servers to agree on whether an event should be allowed or rejected.
+* Persist events we have rejected from federation, fixing the bug where
+ servers would keep requesting the same events.
+* Various federation performance improvements, including:
+
+ - Add in memory caches on queries such as:
+
+ * Computing the state of a room at a point in time, used for
+ authorization on federation requests.
+ * Fetching events from the database.
+ * User's room membership, used for authorizing presence updates.
+
+ - Upgraded JSON library to improve parsing and serialisation speeds.
+
+* Add default avatars to new user accounts using pydenticon library.
+* Correctly time out federation requests.
+* Retry federation requests against different servers.
+* Add support for push and push rules.
+* Add alpha versions of proposed new CSv2 APIs, including ``/sync`` API.
+
+Changes in synapse 0.6.1 (2015-01-07)
+=====================================
+
+* Major optimizations to improve performance of initial sync and event sending
+ in large rooms (by up to 10x)
+* Media repository now includes a Content-Length header on media downloads.
+* Improve quality of thumbnails by changing resizing algorithm.
+
+Changes in synapse 0.6.0 (2014-12-16)
+=====================================
+
+* Add new API for media upload and download that supports thumbnailing.
+* Replicate media uploads over multiple homeservers so media is always served
+ to clients from their local homeserver. This obsoletes the
+ --content-addr parameter and confusion over accessing content directly
+ from remote homeservers.
+* Implement exponential backoff when retrying federation requests when
+ sending to remote homeservers which are offline.
+* Implement typing notifications.
+* Fix bugs where we sent events with invalid signatures due to bugs where
+ we incorrectly persisted events.
+* Improve performance of database queries involving retrieving events.
+
+Changes in synapse 0.5.4a (2014-12-13)
+======================================
+
+* Fix bug while generating the error message when a file path specified in
+ the config doesn't exist.
+
+Changes in synapse 0.5.4 (2014-12-03)
+=====================================
+
+* Fix presence bug where some rooms did not display presence updates for
+ remote users.
+* Do not log SQL timing log lines when started with "-v"
+* Fix potential memory leak.
+
+Changes in synapse 0.5.3c (2014-12-02)
+======================================
+
+* Change the default value for the `content_addr` option to use the HTTP
+ listener, as by default the HTTPS listener will be using a self-signed
+ certificate.
+
+Changes in synapse 0.5.3 (2014-11-27)
+=====================================
+
+* Fix bug that caused joining a remote room to fail if a single event was not
+ signed correctly.
+* Fix bug which caused servers to continuously try and fetch events from other
+ servers.
+
+Changes in synapse 0.5.2 (2014-11-26)
+=====================================
+
+Fix major bug that caused rooms to disappear from peoples initial sync.
+
+Changes in synapse 0.5.1 (2014-11-26)
+=====================================
+See UPGRADES.rst for specific instructions on how to upgrade.
+
+ * Fix bug where we served up an Event that did not match its signatures.
+ * Fix regression where we no longer correctly handled the case where a
+ homeserver receives an event for a room it doesn't recognise (but is in.)
+
+Changes in synapse 0.5.0 (2014-11-19)
+=====================================
+This release includes changes to the federation protocol and client-server API
+that is not backwards compatible.
+
+This release also changes the internal database schemas and so requires servers to
+drop their current history. See UPGRADES.rst for details.
+
+Homeserver:
+ * Add authentication and authorization to the federation protocol. Events are
+ now signed by their originating homeservers.
+ * Implement the new authorization model for rooms.
+ * Split out web client into a seperate repository: matrix-angular-sdk.
+ * Change the structure of PDUs.
+ * Fix bug where user could not join rooms via an alias containing 4-byte
+ UTF-8 characters.
+ * Merge concept of PDUs and Events internally.
+ * Improve logging by adding request ids to log lines.
+ * Implement a very basic room initial sync API.
+ * Implement the new invite/join federation APIs.
+
+Webclient:
+ * The webclient has been moved to a seperate repository.
+
+Changes in synapse 0.4.2 (2014-10-31)
+=====================================
+
+Homeserver:
+ * Fix bugs where we did not notify users of correct presence updates.
+ * Fix bug where we did not handle sub second event stream timeouts.
+
+Webclient:
+ * Add ability to click on messages to see JSON.
+ * Add ability to redact messages.
+ * Add ability to view and edit all room state JSON.
+ * Handle incoming redactions.
+ * Improve feedback on errors.
+ * Fix bugs in mobile CSS.
+ * Fix bugs with desktop notifications.
+
+Changes in synapse 0.4.1 (2014-10-17)
+=====================================
+Webclient:
+ * Fix bug with display of timestamps.
+
+Changes in synpase 0.4.0 (2014-10-17)
+=====================================
+This release includes changes to the federation protocol and client-server API
+that is not backwards compatible.
+
+The Matrix specification has been moved to a separate git repository:
+http://github.com/matrix-org/matrix-doc
+
+You will also need an updated syutil and config. See UPGRADES.rst.
+
+Homeserver:
+ * Sign federation transactions to assert strong identity over federation.
+ * Rename timestamp keys in PDUs and events from 'ts' and 'hsob_ts' to 'origin_server_ts'.
+
+
+Changes in synapse 0.3.4 (2014-09-25)
+=====================================
+This version adds support for using a TURN server. See docs/turn-howto.rst on
+how to set one up.
+
+Homeserver:
+ * Add support for redaction of messages.
+ * Fix bug where inviting a user on a remote home server could take up to
+ 20-30s.
+ * Implement a get current room state API.
+ * Add support specifying and retrieving turn server configuration.
+
+Webclient:
+ * Add button to send messages to users from the home page.
+ * Add support for using TURN for VoIP calls.
+ * Show display name change messages.
+ * Fix bug where the client didn't get the state of a newly joined room
+ until after it has been refreshed.
+ * Fix bugs with tab complete.
+ * Fix bug where holding down the down arrow caused chrome to chew 100% CPU.
+ * Fix bug where desktop notifications occasionally used "Undefined" as the
+ display name.
+ * Fix more places where we sometimes saw room IDs incorrectly.
+ * Fix bug which caused lag when entering text in the text box.
+
+Changes in synapse 0.3.3 (2014-09-22)
+=====================================
+
+Homeserver:
+ * Fix bug where you continued to get events for rooms you had left.
+
+Webclient:
+ * Add support for video calls with basic UI.
+ * Fix bug where one to one chats were named after your display name rather
+ than the other person's.
+ * Fix bug which caused lag when typing in the textarea.
+ * Refuse to run on browsers we know won't work.
+ * Trigger pagination when joining new rooms.
+ * Fix bug where we sometimes didn't display invitations in recents.
+ * Automatically join room when accepting a VoIP call.
+ * Disable outgoing and reject incoming calls on browsers we don't support
+ VoIP in.
+ * Don't display desktop notifications for messages in the room you are
+ non-idle and speaking in.
+
+Changes in synapse 0.3.2 (2014-09-18)
+=====================================
+
+Webclient:
+ * Fix bug where an empty "bing words" list in old accounts didn't send
+ notifications when it should have done.
+
+Changes in synapse 0.3.1 (2014-09-18)
+=====================================
+This is a release to hotfix v0.3.0 to fix two regressions.
+
+Webclient:
+ * Fix a regression where we sometimes displayed duplicate events.
+ * Fix a regression where we didn't immediately remove rooms you were
+ banned in from the recents list.
+
+Changes in synapse 0.3.0 (2014-09-18)
+=====================================
+See UPGRADE for information about changes to the client server API, including
+breaking backwards compatibility with VoIP calls and registration API.
+
+Homeserver:
+ * When a user changes their displayname or avatar the server will now update
+ all their join states to reflect this.
+ * The server now adds "age" key to events to indicate how old they are. This
+ is clock independent, so at no point does any server or webclient have to
+ assume their clock is in sync with everyone else.
+ * Fix bug where we didn't correctly pull in missing PDUs.
+ * Fix bug where prev_content key wasn't always returned.
+ * Add support for password resets.
+
+Webclient:
+ * Improve page content loading.
+ * Join/parts now trigger desktop notifications.
+ * Always show room aliases in the UI if one is present.
+ * No longer show user-count in the recents side panel.
+ * Add up & down arrow support to the text box for message sending to step
+ through your sent history.
+ * Don't display notifications for our own messages.
+ * Emotes are now formatted correctly in desktop notifications.
+ * The recents list now differentiates between public & private rooms.
+ * Fix bug where when switching between rooms the pagination flickered before
+ the view jumped to the bottom of the screen.
+ * Add bing word support.
+
+Registration API:
+ * The registration API has been overhauled to function like the login API. In
+ practice, this means registration requests must now include the following:
+ 'type':'m.login.password'. See UPGRADE for more information on this.
+ * The 'user_id' key has been renamed to 'user' to better match the login API.
+ * There is an additional login type: 'm.login.email.identity'.
+ * The command client and web client have been updated to reflect these changes.
+
+Changes in synapse 0.2.3 (2014-09-12)
+=====================================
+
+Homeserver:
+ * Fix bug where we stopped sending events to remote home servers if a
+ user from that home server left, even if there were some still in the
+ room.
+ * Fix bugs in the state conflict resolution where it was incorrectly
+ rejecting events.
+
+Webclient:
+ * Display room names and topics.
+ * Allow setting/editing of room names and topics.
+ * Display information about rooms on the main page.
+ * Handle ban and kick events in real time.
+ * VoIP UI and reliability improvements.
+ * Add glare support for VoIP.
+ * Improvements to initial startup speed.
+ * Don't display duplicate join events.
+ * Local echo of messages.
+ * Differentiate sending and sent of local echo.
+ * Various minor bug fixes.
+
+Changes in synapse 0.2.2 (2014-09-06)
+=====================================
+
+Homeserver:
+ * When the server returns state events it now also includes the previous
+ content.
+ * Add support for inviting people when creating a new room.
+ * Make the homeserver inform the room via `m.room.aliases` when a new alias
+ is added for a room.
+ * Validate `m.room.power_level` events.
+
+Webclient:
+ * Add support for captchas on registration.
+ * Handle `m.room.aliases` events.
+ * Asynchronously send messages and show a local echo.
+ * Inform the UI when a message failed to send.
+ * Only autoscroll on receiving a new message if the user was already at the
+ bottom of the screen.
+ * Add support for ban/kick reasons.
+
+Changes in synapse 0.2.1 (2014-09-03)
+=====================================
+
+Homeserver:
+ * Added support for signing up with a third party id.
+ * Add synctl scripts.
+ * Added rate limiting.
+ * Add option to change the external address the content repo uses.
+ * Presence bug fixes.
+
+Webclient:
+ * Added support for signing up with a third party id.
+ * Added support for banning and kicking users.
+ * Added support for displaying and setting ops.
+ * Added support for room names.
+ * Fix bugs with room membership event display.
+
+Changes in synapse 0.2.0 (2014-09-02)
+=====================================
+This update changes many configuration options, updates the
+database schema and mandates SSL for server-server connections.
+
+Homeserver:
+ * Require SSL for server-server connections.
+ * Add SSL listener for client-server connections.
+ * Add ability to use config files.
+ * Add support for kicking/banning and power levels.
+ * Allow setting of room names and topics on creation.
+ * Change presence to include last seen time of the user.
+ * Change url path prefix to /_matrix/...
+ * Bug fixes to presence.
+
+Webclient:
+ * Reskin the CSS for registration and login.
+ * Various improvements to rooms CSS.
+ * Support changes in client-server API.
+ * Bug fixes to VOIP UI.
+ * Various bug fixes to handling of changes to room member list.
+
+Changes in synapse 0.1.2 (2014-08-29)
+=====================================
+
+Webclient:
+ * Add basic call state UI for VoIP calls.
+
+Changes in synapse 0.1.1 (2014-08-29)
+=====================================
+
+Homeserver:
+ * Fix bug that caused the event stream to not notify some clients about
+ changes.
+
+Changes in synapse 0.1.0 (2014-08-29)
+=====================================
+Presence has been reenabled in this release.
+
+Homeserver:
+ * Update client to server API, including:
+ - Use a more consistent url scheme.
+ - Provide more useful information in the initial sync api.
+ * Change the presence handling to be much more efficient.
+ * Change the presence server to server API to not require explicit polling of
+ all users who share a room with a user.
+ * Fix races in the event streaming logic.
+
+Webclient:
+ * Update to use new client to server API.
+ * Add basic VOIP support.
+ * Add idle timers that change your status to away.
+ * Add recent rooms column when viewing a room.
+ * Various network efficiency improvements.
+ * Add basic mobile browser support.
+ * Add a settings page.
+
+Changes in synapse 0.0.1 (2014-08-22)
+=====================================
+Presence has been disabled in this release due to a bug that caused the
+homeserver to spam other remote homeservers.
+
+Homeserver:
+ * Completely change the database schema to support generic event types.
+ * Improve presence reliability.
+ * Improve reliability of joining remote rooms.
+ * Fix bug where room join events were duplicated.
+ * Improve initial sync API to return more information to the client.
+ * Stop generating fake messages for room membership events.
+
+Webclient:
+ * Add tab completion of names.
+ * Add ability to upload and send images.
+ * Add profile pages.
+ * Improve CSS layout of room.
+ * Disambiguate identical display names.
+ * Don't get remote users display names and avatars individually.
+ * Use the new initial sync API to reduce number of round trips to the homeserver.
+ * Change url scheme to use room aliases instead of room ids where known.
+ * Increase longpoll timeout.
+
+Changes in synapse 0.0.0 (2014-08-13)
+=====================================
+
+ * Initial alpha release
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 00000000..2a88647c
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,118 @@
+Contributing code to Matrix
+===========================
+
+Everyone is welcome to contribute code to Matrix
+(https://github.com/matrix-org), provided that they are willing to license
+their contributions under the same license as the project itself. We follow a
+simple 'inbound=outbound' model for contributions: the act of submitting an
+'inbound' contribution means that the contributor agrees to license the code
+under the same terms as the project's overall 'outbound' license - in our
+case, this is almost always Apache Software License v2 (see LICENSE).
+
+How to contribute
+~~~~~~~~~~~~~~~~~
+
+The preferred and easiest way to contribute changes to Matrix is to fork the
+relevant project on github, and then create a pull request to ask us to pull
+your changes into our repo
+(https://help.github.com/articles/using-pull-requests/)
+
+**The single biggest thing you need to know is: please base your changes on
+the develop branch - /not/ master.**
+
+We use the master branch to track the most recent release, so that folks who
+blindly clone the repo and automatically check out master get something that
+works. Develop is the unstable branch where all the development actually
+happens: the workflow is that contributors should fork the develop branch to
+make a 'feature' branch for a particular contribution, and then make a pull
+request to merge this back into the matrix.org 'official' develop branch. We
+use github's pull request workflow to review the contribution, and either ask
+you to make any refinements needed or merge it and make them ourselves. The
+changes will then land on master when we next do a release.
+
+We use Jenkins for continuous integration (http://matrix.org/jenkins), and
+typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
+
+Code style
+~~~~~~~~~~
+
+All Matrix projects have a well-defined code-style - and sometimes we've even
+got as far as documenting it... For instance, synapse's code style doc lives
+at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
+
+Please ensure your changes match the cosmetic style of the existing project,
+and **never** mix cosmetic and functional changes in the same commit, as it
+makes it horribly hard to review otherwise.
+
+Attribution
+~~~~~~~~~~~
+
+Everyone who contributes anything to Matrix is welcome to be listed in the
+AUTHORS.rst file for the project in question. Please feel free to include a
+change to AUTHORS.rst in your pull request to list yourself and a short
+description of the area(s) you've worked on. Also, we sometimes have swag to
+give away to contributors - if you feel that Matrix-branded apparel is missing
+from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
+
+Sign off
+~~~~~~~~
+
+In order to have a concrete record that your contribution is intentional
+and you agree to license it under the same terms as the project's license, we've adopted the
+same lightweight approach that the Linux Kernel
+(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
+(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
+projects use: the DCO (Developer Certificate of Origin:
+http://developercertificate.org/). This is a simple declaration that you wrote
+the contribution or otherwise have the right to contribute it to Matrix::
+
+ Developer Certificate of Origin
+ Version 1.1
+
+ Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+ 660 York Street, Suite 102,
+ San Francisco, CA 94110 USA
+
+ Everyone is permitted to copy and distribute verbatim copies of this
+ license document, but changing it is not allowed.
+
+ Developer's Certificate of Origin 1.1
+
+ By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+If you agree to this for your contribution, then all that's needed is to
+include the line in your commit or pull request comment::
+
+ Signed-off-by: Your Name <your@email.example.org>
+
+...using your real name; unfortunately pseudonyms and anonymous contributions
+can't be accepted. Git makes this trivial - just use the -s flag when you do
+``git commit``, having first set ``user.name`` and ``user.email`` git configs
+(which you should have done anyway :)
+
+Conclusion
+~~~~~~~~~~
+
+That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do! \ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..5668665d
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,25 @@
+include synctl
+include LICENSE
+include VERSION
+include *.rst
+include demo/README
+include demo/demo.tls.dh
+include demo/*.py
+include demo/*.sh
+
+recursive-include synapse/storage/schema *.sql
+recursive-include synapse/storage/schema *.py
+
+recursive-include docs *
+recursive-include scripts *
+recursive-include scripts-dev *
+recursive-include tests *.py
+
+recursive-include synapse/static *.css
+recursive-include synapse/static *.gif
+recursive-include synapse/static *.html
+recursive-include synapse/static *.js
+
+exclude jenkins.sh
+
+prune demo/etc
diff --git a/MAP.rst b/MAP.rst
new file mode 100644
index 00000000..0f8e9818
--- /dev/null
+++ b/MAP.rst
@@ -0,0 +1,35 @@
+Directory Structure
+===================
+
+Warning: this may be a bit stale...
+
+::
+
+ .
+ ├── cmdclient Basic CLI python Matrix client
+ ├── demo Scripts for running standalone Matrix demos
+ ├── docs All doc, including the draft Matrix API spec
+ │   ├── client-server The client-server Matrix API spec
+ │   ├── model Domain-specific elements of the Matrix API spec
+ │   ├── server-server The server-server model of the Matrix API spec
+ │   └── sphinx The internal API doc of the Synapse homeserver
+ ├── experiments Early experiments of using Synapse's internal APIs
+ ├── graph Visualisation of Matrix's distributed message store
+ ├── synapse The reference Matrix homeserver implementation
+ │   ├── api Common building blocks for the APIs
+ │   │   ├── events Definition of state representation Events
+ │   │   └── streams Definition of streamable Event objects
+ │   ├── app The __main__ entry point for the homeserver
+ │   ├── crypto The PKI client/server used for secure federation
+ │   │   └── resource PKI helper objects (e.g. keys)
+ │   ├── federation Server-server state replication logic
+ │   ├── handlers The main business logic of the homeserver
+ │   ├── http Wrappers around Twisted's HTTP server & client
+ │   ├── rest Servlet-style RESTful API
+ │   ├── storage Persistence subsystem (currently only sqlite3)
+ │   │   └── schema sqlite persistence schema
+ │   └── util Synapse-specific utilities
+ ├── tests Unit tests for the Synapse homeserver
+ └── webclient Basic AngularJS Matrix web client
+
+
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..a8f0c621
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,519 @@
+.. contents::
+
+Introduction
+============
+
+Matrix is an ambitious new ecosystem for open federated Instant Messaging and
+VoIP. The basics you need to know to get up and running are:
+
+- Everything in Matrix happens in a room. Rooms are distributed and do not
+ exist on any single server. Rooms can be located using convenience aliases
+ like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
+
+- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
+ you will normally refer to yourself and others using a 3PID: email
+ address, phone number, etc rather than manipulating Matrix user IDs)
+
+The overall architecture is::
+
+ client <----> homeserver <=====================> homeserver <----> client
+ https://somewhere.org/_matrix https://elsewhere.net/_matrix
+
+``#matrix:matrix.org`` is the official support room for Matrix, and can be
+accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
+bridge at irc://irc.freenode.net/matrix.
+
+Synapse is currently in rapid development, but as of version 0.5 we believe it
+is sufficiently stable to be run as an internet-facing service for real usage!
+
+About Matrix
+============
+
+Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
+which handle:
+
+- Creating and managing fully distributed chat rooms with no
+ single points of control or failure
+- Eventually-consistent cryptographically secure synchronisation of room
+ state across a global open network of federated servers and services
+- Sending and receiving extensible messages in a room with (optional)
+ end-to-end encryption[1]
+- Inviting, joining, leaving, kicking, banning room members
+- Managing user accounts (registration, login, logout)
+- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
+ Facebook accounts to authenticate, identify and discover users on Matrix.
+- Placing 1:1 VoIP and Video calls
+
+These APIs are intended to be implemented on a wide range of servers, services
+and clients, letting developers build messaging and VoIP functionality on top
+of the entirely open Matrix ecosystem rather than using closed or proprietary
+solutions. The hope is for Matrix to act as the building blocks for a new
+generation of fully open and interoperable messaging and VoIP apps for the
+internet.
+
+Synapse is a reference "homeserver" implementation of Matrix from the core
+development team at matrix.org, written in Python/Twisted for clarity and
+simplicity. It is intended to showcase the concept of Matrix and let folks see
+the spec in the context of a codebase and let you run your own homeserver and
+generally help bootstrap the ecosystem.
+
+In Matrix, every user runs one or more Matrix clients, which connect through to
+a Matrix homeserver which stores all their personal chat history and user
+account information - much as a mail client connects through to an IMAP/SMTP
+server. Just like email, you can either run your own Matrix homeserver and
+control and own your own communications and history or use one hosted by
+someone else (e.g. matrix.org) - there is no single point of control or
+mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
+
+Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
+web client demo implemented in AngularJS) and cmdclient (a basic Python
+command line utility which lets you easily see what the JSON APIs are up to).
+
+Meanwhile, iOS and Android SDKs and clients are available from:
+
+- https://github.com/matrix-org/matrix-ios-sdk
+- https://github.com/matrix-org/matrix-ios-kit
+- https://github.com/matrix-org/matrix-ios-console
+- https://github.com/matrix-org/matrix-android-sdk
+
+We'd like to invite you to join #matrix:matrix.org (via
+https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
+Matrix spec at https://matrix.org/docs/spec and API docs at
+https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
+report any bugs via https://matrix.org/jira.
+
+Thanks for using Matrix!
+
+[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
+
+Synapse Installation
+====================
+
+Synapse is the reference python/twisted Matrix homeserver implementation.
+
+System requirements:
+- POSIX-compliant system (tested on Linux & OS X)
+- Python 2.7
+- At least 512 MB RAM.
+
+Synapse is written in python but some of the libraries is uses are written in
+C. So before we can install synapse itself we need a working C compiler and the
+header files for python C extensions.
+
+Installing prerequisites on Ubuntu or Debian::
+
+ sudo apt-get install build-essential python2.7-dev libffi-dev \
+ python-pip python-setuptools sqlite3 \
+ libssl-dev python-virtualenv libjpeg-dev
+
+Installing prerequisites on ArchLinux::
+
+ sudo pacman -S base-devel python2 python-pip \
+ python-setuptools python-virtualenv sqlite3
+
+Installing prerequisites on Mac OS X::
+
+ xcode-select --install
+ sudo easy_install pip
+ sudo pip install virtualenv
+
+To install the synapse homeserver run::
+
+ virtualenv -p python2.7 ~/.synapse
+ source ~/.synapse/bin/activate
+ pip install --upgrade setuptools
+ pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
+
+This installs synapse, along with the libraries it uses, into a virtual
+environment under ``~/.synapse``. Feel free to pick a different directory
+if you prefer.
+
+In case of problems, please see the _Troubleshooting section below.
+
+Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
+above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
+
+To set up your homeserver, run (in your virtualenv, as before)::
+
+ cd ~/.synapse
+ python -m synapse.app.homeserver \
+ --server-name machine.my.domain.name \
+ --config-path homeserver.yaml \
+ --generate-config
+
+Substituting your host and domain name as appropriate.
+
+This will generate you a config file that you can then customise, but it will
+also generate a set of keys for you. These keys will allow your Home Server to
+identify itself to other Home Servers, so don't lose or delete them. It would be
+wise to back them up somewhere safe. If, for whatever reason, you do need to
+change your Home Server's keys, you may find that other Home Servers have the
+old key cached. If you update the signing key, you should change the name of the
+key in the <server name>.signing.key file (the second word, which by default is
+, 'auto') to something different.
+
+By default, registration of new users is disabled. You can either enable
+registration in the config by specifying ``enable_registration: true``
+(it is then recommended to also set up CAPTCHA), or
+you can use the command line to register new users::
+
+ $ source ~/.synapse/bin/activate
+ $ register_new_matrix_user -c homeserver.yaml https://localhost:8448
+ New user localpart: erikj
+ Password:
+ Confirm password:
+ Success!
+
+For reliable VoIP calls to be routed via this homeserver, you MUST configure
+a TURN server. See docs/turn-howto.rst for details.
+
+Using PostgreSQL
+================
+
+As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
+alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
+traditionally used for convenience and simplicity.
+
+The advantages of Postgres include:
+
+* significant performance improvements due to the superior threading and
+ caching model, smarter query optimiser
+* allowing the DB to be run on separate hardware
+* allowing basic active/backup high-availability with a "hot spare" synapse
+ pointing at the same DB master, as well as enabling DB replication in
+ synapse itself.
+
+The only disadvantage is that the code is relatively new as of April 2015 and
+may have a few regressions relative to SQLite.
+
+For information on how to install and use PostgreSQL, please see
+`docs/postgres.rst <docs/postgres.rst>`_.
+
+Running Synapse
+===============
+
+To actually run your new homeserver, pick a working directory for Synapse to
+run (e.g. ``~/.synapse``), and::
+
+ cd ~/.synapse
+ source ./bin/activate
+ synctl start
+
+Platform Specific Instructions
+==============================
+
+ArchLinux
+---------
+
+The quickest way to get up and running with ArchLinux is probably with Ivan
+Shapovalov's AUR package from
+https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
+the necessary dependencies.
+
+Alternatively, to install using pip a few changes may be needed as ArchLinux
+defaults to python 3, but synapse currently assumes python 2.7 by default:
+
+pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
+
+ sudo pip2.7 install --upgrade pip
+
+You also may need to explicitly specify python 2.7 again during the install
+request::
+
+ pip2.7 install --process-dependency-links \
+ https://github.com/matrix-org/synapse/tarball/master
+
+If you encounter an error with lib bcrypt causing an Wrong ELF Class:
+ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
+compile it under the right architecture. (This should not be needed if
+installing under virtualenv)::
+
+ sudo pip2.7 uninstall py-bcrypt
+ sudo pip2.7 install py-bcrypt
+
+During setup of Synapse you need to call python2.7 directly again::
+
+ cd ~/.synapse
+ python2.7 -m synapse.app.homeserver \
+ --server-name machine.my.domain.name \
+ --config-path homeserver.yaml \
+ --generate-config
+
+...substituting your host and domain name as appropriate.
+
+Windows Install
+---------------
+Synapse can be installed on Cygwin. It requires the following Cygwin packages:
+
+- gcc
+- git
+- libffi-devel
+- openssl (and openssl-devel, python-openssl)
+- python
+- python-setuptools
+
+The content repository requires additional packages and will be unable to process
+uploads without them:
+
+- libjpeg8
+- libjpeg8-devel
+- zlib
+
+If you choose to install Synapse without these packages, you will need to reinstall
+``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
+pillow --user``
+
+Troubleshooting:
+
+- You may need to upgrade ``setuptools`` to get this to work correctly:
+ ``pip install setuptools --upgrade``.
+- You may encounter errors indicating that ``ffi.h`` is missing, even with
+ ``libffi-devel`` installed. If you do, copy the ``.h`` files:
+ ``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include``
+- You may need to install libsodium from source in order to install PyNacl. If
+ you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
+ it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
+
+Troubleshooting
+===============
+
+Troubleshooting Installation
+----------------------------
+
+Synapse requires pip 1.7 or later, so if your OS provides too old a version and
+you get errors about ``error: no such option: --process-dependency-links`` you
+may need to manually upgrade it::
+
+ sudo pip install --upgrade pip
+
+Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
+You can fix this by upgrading setuptools::
+
+ pip install --upgrade setuptools
+
+If pip crashes mid-installation for reason (e.g. lost terminal), pip may
+refuse to run until you remove the temporary installation directory it
+created. To reset the installation::
+
+ rm -rf /tmp/pip_install_matrix
+
+pip seems to leak *lots* of memory during installation. For instance, a Linux
+host with 512MB of RAM may run out of memory whilst installing Twisted. If this
+happens, you will have to individually install the dependencies which are
+failing, e.g.::
+
+ pip install twisted
+
+On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
+will need to export CFLAGS=-Qunused-arguments.
+
+Troubleshooting Running
+-----------------------
+
+If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
+to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
+encryption and digital signatures.
+Unfortunately PyNACL currently has a few issues
+(https://github.com/pyca/pynacl/issues/53) and
+(https://github.com/pyca/pynacl/issues/79) that mean it may not install
+correctly, causing all tests to fail with errors about missing "sodium.h". To
+fix try re-installing from PyPI or directly from
+(https://github.com/pyca/pynacl)::
+
+ # Install from PyPI
+ pip install --user --upgrade --force pynacl
+
+ # Install from github
+ pip install --user https://github.com/pyca/pynacl/tarball/master
+
+ArchLinux
+~~~~~~~~~
+
+If running `$ synctl start` fails with 'returned non-zero exit status 1',
+you will need to explicitly call Python2.7 - either running as::
+
+ python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml
+
+...or by editing synctl with the correct python executable.
+
+Synapse Development
+===================
+
+To check out a synapse for development, clone the git repo into a working
+directory of your choice::
+
+ git clone https://github.com/matrix-org/synapse.git
+ cd synapse
+
+Synapse has a number of external dependencies, that are easiest
+to install using pip and a virtualenv::
+
+ virtualenv env
+ source env/bin/activate
+ python synapse/python_dependencies.py | xargs -n1 pip install
+ pip install setuptools_trial mock
+
+This will run a process of downloading and installing all the needed
+dependencies into a virtual env.
+
+Once this is done, you may wish to run Synapse's unit tests, to
+check that everything is installed as it should be::
+
+ python setup.py test
+
+This should end with a 'PASSED' result::
+
+ Ran 143 tests in 0.601s
+
+ PASSED (successes=143)
+
+
+Upgrading an existing Synapse
+=============================
+
+The instructions for upgrading synapse are in `UPGRADE.rst`_.
+Please check these instructions as upgrading may require extra steps for some
+versions of synapse.
+
+.. _UPGRADE.rst: UPGRADE.rst
+
+Setting up Federation
+=====================
+
+In order for other homeservers to send messages to your server, it will need to
+be publicly visible on the internet, and they will need to know its host name.
+You have two choices here, which will influence the form of your Matrix user
+IDs:
+
+1) Use the machine's own hostname as available on public DNS in the form of
+ its A or AAAA records. This is easier to set up initially, perhaps for
+ testing, but lacks the flexibility of SRV.
+
+2) Set up a SRV record for your domain name. This requires you create a SRV
+ record in DNS, but gives the flexibility to run the server on your own
+ choice of TCP port, on a machine that might not be the same name as the
+ domain name.
+
+For the first form, simply pass the required hostname (of the machine) as the
+--server-name parameter::
+
+ python -m synapse.app.homeserver \
+ --server-name machine.my.domain.name \
+ --config-path homeserver.yaml \
+ --generate-config
+ python -m synapse.app.homeserver --config-path homeserver.yaml
+
+Alternatively, you can run ``synctl start`` to guide you through the process.
+
+For the second form, first create your SRV record and publish it in DNS. This
+needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
+and port where the server is running. (At the current time synapse does not
+support clustering multiple servers into a single logical homeserver). The DNS
+record would then look something like::
+
+ $ dig -t srv _matrix._tcp.machine.my.domain.name
+ _matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
+
+
+At this point, you should then run the homeserver with the hostname of this
+SRV record, as that is the name other machines will expect it to have::
+
+ python -m synapse.app.homeserver \
+ --server-name YOURDOMAIN \
+ --config-path homeserver.yaml \
+ --generate-config
+ python -m synapse.app.homeserver --config-path homeserver.yaml
+
+
+You may additionally want to pass one or more "-v" options, in order to
+increase the verbosity of logging output; at least for initial testing.
+
+Running a Demo Federation of Synapses
+-------------------------------------
+
+If you want to get up and running quickly with a trio of homeservers in a
+private federation (``localhost:8080``, ``localhost:8081`` and
+``localhost:8082``) which you can then access through the webclient running at
+http://localhost:8080. Simply run::
+
+ demo/start.sh
+
+This is mainly useful just for development purposes.
+
+Running The Demo Web Client
+===========================
+
+The homeserver runs a web client by default at https://localhost:8448/.
+
+If this is the first time you have used the client from that browser (it uses
+HTML5 local storage to remember its config), you will need to log in to your
+account. If you don't yet have an account, because you've just started the
+homeserver for the first time, then you'll need to register one.
+
+
+Registering A New Account
+-------------------------
+
+Your new user name will be formed partly from the hostname your server is
+running as, and partly from a localpart you specify when you create the
+account. Your name will take the form of::
+
+ @localpart:my.domain.here
+ (pronounced "at localpart on my dot domain dot here")
+
+Specify your desired localpart in the topmost box of the "Register for an
+account" form, and click the "Register" button. Hostnames can contain ports if
+required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
+internal synapse sandbox running on localhost).
+
+If registration fails, you may need to enable it in the homeserver (see
+`Synapse Installation`_ above)
+
+
+Logging In To An Existing Account
+---------------------------------
+
+Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
+the form and click the Login button.
+
+
+Identity Servers
+================
+
+The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
+given Matrix user is very security-sensitive, as there is obvious risk of spam
+if it is too easy to sign up for Matrix accounts or harvest 3PID data.
+Meanwhile the job of publishing the end-to-end encryption public keys for
+Matrix users is also very security-sensitive for similar reasons.
+
+Therefore the role of managing trusted identity in the Matrix ecosystem is
+farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
+Identity Servers' such as ``sydent``, whose role is purely to authenticate and
+track 3PID logins and publish end-user public keys.
+
+It's currently early days for identity servers as Matrix is not yet using 3PIDs
+as the primary means of identity and E2E encryption is not complete. As such,
+we are running a single identity server (https://matrix.org) at the current
+time.
+
+
+Where's the spec?!
+==================
+
+The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
+A recent HTML snapshot of this lives at http://matrix.org/docs/spec
+
+
+Building Internal API Documentation
+===================================
+
+Before building internal API documentation install sphinx and
+sphinxcontrib-napoleon::
+
+ pip install sphinx
+ pip install sphinxcontrib-napoleon
+
+Building internal API documentation::
+
+ python setup.py build_sphinx
+
diff --git a/UPGRADE.rst b/UPGRADE.rst
new file mode 100644
index 00000000..35a0333a
--- /dev/null
+++ b/UPGRADE.rst
@@ -0,0 +1,255 @@
+Upgrading Synapse
+=================
+
+Before upgrading check if any special steps are required to upgrade from the
+what you currently have installed to current version of synapse. The extra
+instructions that may be required are listed later in this document.
+
+If synapse was installed in a virtualenv then active that virtualenv before
+upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
+
+.. code:: bash
+
+ source ~/.synapse/bin/activate
+
+If synapse was installed using pip then upgrade to the latest version by
+running:
+
+.. code:: bash
+
+ pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
+
+If synapse was installed using git then upgrade to the latest version by
+running:
+
+.. code:: bash
+
+ # Pull the latest version of the master branch.
+ git pull
+ # Update the versions of synapse's python dependencies.
+ python synapse/python_dependencies.py | xargs -n1 pip install
+
+
+
+Upgrading to v0.9.0
+===================
+
+Application services have had a breaking API change in this version.
+
+They can no longer register themselves with a home server using the AS HTTP API. This
+decision was made because a compromised application service with free reign to register
+any regex in effect grants full read/write access to the home server if a regex of ``.*``
+is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
+big of a security risk to ignore, and so the ability to register with the HS remotely has
+been removed.
+
+It has been replaced by specifying a list of application service registrations in
+``homeserver.yaml``::
+
+ app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
+
+Where ``registration-01.yaml`` looks like::
+
+ url: <String> # e.g. "https://my.application.service.com"
+ as_token: <String>
+ hs_token: <String>
+ sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
+ namespaces:
+ users:
+ - exclusive: <Boolean>
+ regex: <String> # e.g. "@prefix_.*"
+ aliases:
+ - exclusive: <Boolean>
+ regex: <String>
+ rooms:
+ - exclusive: <Boolean>
+ regex: <String>
+
+Upgrading to v0.8.0
+===================
+
+Servers which use captchas will need to add their public key to::
+
+ static/client/register/register_config.js
+
+ window.matrixRegistrationConfig = {
+ recaptcha_public_key: "YOUR_PUBLIC_KEY"
+ };
+
+This is required in order to support registration fallback (typically used on
+mobile devices).
+
+
+Upgrading to v0.7.0
+===================
+
+New dependencies are:
+
+- pydenticon
+- simplejson
+- syutil
+- matrix-angular-sdk
+
+To pull in these dependencies in a virtual env, run::
+
+ python synapse/python_dependencies.py | xargs -n 1 pip install
+
+Upgrading to v0.6.0
+===================
+
+To pull in new dependencies, run::
+
+ python setup.py develop --user
+
+This update includes a change to the database schema. To upgrade you first need
+to upgrade the database by running::
+
+ python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
+
+Where `<db>` is the location of the database, `<server_name>` is the
+server name as specified in the synapse configuration, and `<signing_key>` is
+the location of the signing key as specified in the synapse configuration.
+
+This may take some time to complete. Failures of signatures and content hashes
+can safely be ignored.
+
+
+Upgrading to v0.5.1
+===================
+
+Depending on precisely when you installed v0.5.0 you may have ended up with
+a stale release of the reference matrix webclient installed as a python module.
+To uninstall it and ensure you are depending on the latest module, please run::
+
+ $ pip uninstall syweb
+
+Upgrading to v0.5.0
+===================
+
+The webclient has been split out into a seperate repository/pacakage in this
+release. Before you restart your homeserver you will need to pull in the
+webclient package by running::
+
+ python setup.py develop --user
+
+This release completely changes the database schema and so requires upgrading
+it before starting the new version of the homeserver.
+
+The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
+database. This will save all user information, such as logins and profiles,
+but will otherwise purge the database. This includes messages, which
+rooms the home server was a member of and room alias mappings.
+
+If you would like to keep your history, please take a copy of your database
+file and ask for help in #matrix:matrix.org. The upgrade process is,
+unfortunately, non trivial and requires human intervention to resolve any
+resulting conflicts during the upgrade process.
+
+Before running the command the homeserver should be first completely
+shutdown. To run it, simply specify the location of the database, e.g.:
+
+ ./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
+
+Once this has successfully completed it will be safe to restart the
+homeserver. You may notice that the homeserver takes a few seconds longer to
+restart than usual as it reinitializes the database.
+
+On startup of the new version, users can either rejoin remote rooms using room
+aliases or by being reinvited. Alternatively, if any other homeserver sends a
+message to a room that the homeserver was previously in the local HS will
+automatically rejoin the room.
+
+Upgrading to v0.4.0
+===================
+
+This release needs an updated syutil version. Run::
+
+ python setup.py develop
+
+You will also need to upgrade your configuration as the signing key format has
+changed. Run::
+
+ python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
+
+
+Upgrading to v0.3.0
+===================
+
+This registration API now closely matches the login API. This introduces a bit
+more backwards and forwards between the HS and the client, but this improves
+the overall flexibility of the API. You can now GET on /register to retrieve a list
+of valid registration flows. Upon choosing one, they are submitted in the same
+way as login, e.g::
+
+ {
+ type: m.login.password,
+ user: foo,
+ password: bar
+ }
+
+The default HS supports 2 flows, with and without Identity Server email
+authentication. Enabling captcha on the HS will add in an extra step to all
+flows: ``m.login.recaptcha`` which must be completed before you can transition
+to the next stage. There is a new login type: ``m.login.email.identity`` which
+contains the ``threepidCreds`` key which were previously sent in the original
+register request. For more information on this, see the specification.
+
+Web Client
+----------
+
+The VoIP specification has changed between v0.2.0 and v0.3.0. Users should
+refresh any browser tabs to get the latest web client code. Users on
+v0.2.0 of the web client will not be able to call those on v0.3.0 and
+vice versa.
+
+
+Upgrading to v0.2.0
+===================
+
+The home server now requires setting up of SSL config before it can run. To
+automatically generate default config use::
+
+ $ python synapse/app/homeserver.py \
+ --server-name machine.my.domain.name \
+ --bind-port 8448 \
+ --config-path homeserver.config \
+ --generate-config
+
+This config can be edited if desired, for example to specify a different SSL
+certificate to use. Once done you can run the home server using::
+
+ $ python synapse/app/homeserver.py --config-path homeserver.config
+
+See the README.rst for more information.
+
+Also note that some config options have been renamed, including:
+
+- "host" to "server-name"
+- "database" to "database-path"
+- "port" to "bind-port" and "unsecure-port"
+
+
+Upgrading to v0.0.1
+===================
+
+This release completely changes the database schema and so requires upgrading
+it before starting the new version of the homeserver.
+
+The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
+database. This will save all user information, such as logins and profiles,
+but will otherwise purge the database. This includes messages, which
+rooms the home server was a member of and room alias mappings.
+
+Before running the command the homeserver should be first completely
+shutdown. To run it, simply specify the location of the database, e.g.:
+
+ ./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
+
+Once this has successfully completed it will be safe to restart the
+homeserver. You may notice that the homeserver takes a few seconds longer to
+restart than usual as it reinitializes the database.
+
+On startup of the new version, users can either rejoin remote rooms using room
+aliases or by being reinvited. Alternatively, if any other homeserver sends a
+message to a room that the homeserver was previously in the local HS will
+automatically rejoin the room.
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
new file mode 100755
index 00000000..d9c6ec6a
--- /dev/null
+++ b/contrib/cmdclient/console.py
@@ -0,0 +1,747 @@
+#!/usr/bin/env python
+
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Starts a synapse client console. """
+
+from twisted.internet import reactor, defer, threads
+from http import TwistedHttpClient
+
+import argparse
+import cmd
+import getpass
+import json
+import shlex
+import sys
+import time
+import urllib
+import urlparse
+
+import nacl.signing
+import nacl.encoding
+
+from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
+
+CONFIG_JSON = "cmdclient_config.json"
+
+TRUSTED_ID_SERVERS = [
+ 'localhost:8001'
+]
+
+class SynapseCmd(cmd.Cmd):
+
+ """Basic synapse command-line processor.
+
+ This processes commands from the user and calls the relevant HTTP methods.
+ """
+
+ def __init__(self, http_client, server_url, identity_server_url, username, token):
+ cmd.Cmd.__init__(self)
+ self.http_client = http_client
+ self.http_client.verbose = True
+ self.config = {
+ "url": server_url,
+ "identityServerUrl": identity_server_url,
+ "user": username,
+ "token": token,
+ "verbose": "on",
+ "complete_usernames": "on",
+ "send_delivery_receipts": "on"
+ }
+ self.path_prefix = "/_matrix/client/api/v1"
+ self.event_stream_token = "END"
+ self.prompt = ">>> "
+
+ def do_EOF(self, line): # allows CTRL+D quitting
+ return True
+
+ def emptyline(self):
+ pass # else it repeats the previous command
+
+ def _usr(self):
+ return self.config["user"]
+
+ def _tok(self):
+ return self.config["token"]
+
+ def _url(self):
+ return self.config["url"] + self.path_prefix
+
+ def _identityServerUrl(self):
+ return self.config["identityServerUrl"]
+
+ def _is_on(self, config_name):
+ if config_name in self.config:
+ return self.config[config_name] == "on"
+ return False
+
+ def _domain(self):
+ if "user" not in self.config or not self.config["user"]:
+ return None
+ return self.config["user"].split(":")[1]
+
+ def do_config(self, line):
+ """ Show the config for this client: "config"
+ Edit a key value mapping: "config key value" e.g. "config token 1234"
+ Config variables:
+ user: The username to auth with.
+ token: The access token to auth with.
+ url: The url of the server.
+ verbose: [on|off] The verbosity of requests/responses.
+ complete_usernames: [on|off] Auto complete partial usernames by
+ assuming they are on the same homeserver as you.
+ E.g. name >> @name:yourhost
+ send_delivery_receipts: [on|off] Automatically send receipts to
+ messages when performing a 'stream' command.
+ Additional key/values can be added and can be substituted into requests
+ by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
+ """
+ if len(line) == 0:
+ print json.dumps(self.config, indent=4)
+ return
+
+ try:
+ args = self._parse(line, ["key", "val"], force_keys=True)
+
+ # make sure restricted config values are checked
+ config_rules = [ # key, valid_values
+ ("verbose", ["on", "off"]),
+ ("complete_usernames", ["on", "off"]),
+ ("send_delivery_receipts", ["on", "off"])
+ ]
+ for key, valid_vals in config_rules:
+ if key == args["key"] and args["val"] not in valid_vals:
+ print "%s value must be one of %s" % (args["key"],
+ valid_vals)
+ return
+
+ # toggle the http client verbosity
+ if args["key"] == "verbose":
+ self.http_client.verbose = "on" == args["val"]
+
+ # assign the new config
+ self.config[args["key"]] = args["val"]
+ print json.dumps(self.config, indent=4)
+
+ save_config(self.config)
+ except Exception as e:
+ print e
+
+ def do_register(self, line):
+ """Registers for a new account: "register <userid> <noupdate>"
+ <userid> : The desired user ID
+ <noupdate> : Do not automatically clobber config values.
+ """
+ args = self._parse(line, ["userid", "noupdate"])
+
+ password = None
+ pwd = None
+ pwd2 = "_"
+ while pwd != pwd2:
+ pwd = getpass.getpass("Type a password for this user: ")
+ pwd2 = getpass.getpass("Retype the password: ")
+ if pwd != pwd2 or len(pwd) == 0:
+ print "Password mismatch."
+ pwd = None
+ else:
+ password = pwd
+
+ body = {
+ "type": "m.login.password"
+ }
+ if "userid" in args:
+ body["user"] = args["userid"]
+ if password:
+ body["password"] = password
+
+ reactor.callFromThread(self._do_register, body,
+ "noupdate" not in args)
+
+ @defer.inlineCallbacks
+ def _do_register(self, data, update_config):
+ # check the registration flows
+ url = self._url() + "/register"
+ json_res = yield self.http_client.do_request("GET", url)
+ print json.dumps(json_res, indent=4)
+
+ passwordFlow = None
+ for flow in json_res["flows"]:
+ if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
+ print "Unable to register: Home server requires captcha."
+ return
+ if flow["type"] == "m.login.password" and "stages" not in flow:
+ passwordFlow = flow
+ break
+
+ if not passwordFlow:
+ return
+
+ json_res = yield self.http_client.do_request("POST", url, data=data)
+ print json.dumps(json_res, indent=4)
+ if update_config and "user_id" in json_res:
+ self.config["user"] = json_res["user_id"]
+ self.config["token"] = json_res["access_token"]
+ save_config(self.config)
+
+ def do_login(self, line):
+ """Login as a specific user: "login @bob:localhost"
+ You MAY be prompted for a password, or instructed to visit a URL.
+ """
+ try:
+ args = self._parse(line, ["user_id"], force_keys=True)
+ can_login = threads.blockingCallFromThread(
+ reactor,
+ self._check_can_login)
+ if can_login:
+ p = getpass.getpass("Enter your password: ")
+ user = args["user_id"]
+ if self._is_on("complete_usernames") and not user.startswith("@"):
+ domain = self._domain()
+ if domain:
+ user = "@" + user + ":" + domain
+
+ reactor.callFromThread(self._do_login, user, p)
+ #print " got %s " % p
+ except Exception as e:
+ print e
+
+ @defer.inlineCallbacks
+ def _do_login(self, user, password):
+ path = "/login"
+ data = {
+ "user": user,
+ "password": password,
+ "type": "m.login.password"
+ }
+ url = self._url() + path
+ json_res = yield self.http_client.do_request("POST", url, data=data)
+ print json_res
+
+ if "access_token" in json_res:
+ self.config["user"] = user
+ self.config["token"] = json_res["access_token"]
+ save_config(self.config)
+ print "Login successful."
+
+ @defer.inlineCallbacks
+ def _check_can_login(self):
+ path = "/login"
+ # ALWAYS check that the home server can handle the login request before
+ # submitting!
+ url = self._url() + path
+ json_res = yield self.http_client.do_request("GET", url)
+ print json_res
+
+ if "flows" not in json_res:
+ print "Failed to find any login flows."
+ defer.returnValue(False)
+
+ flow = json_res["flows"][0] # assume first is the one we want.
+ if ("type" not in flow or "m.login.password" != flow["type"] or
+ "stages" in flow):
+ fallback_url = self._url() + "/login/fallback"
+ print ("Unable to login via the command line client. Please visit "
+ "%s to login." % fallback_url)
+ defer.returnValue(False)
+ defer.returnValue(True)
+
+ def do_emailrequest(self, line):
+ """Requests the association of a third party identifier
+ <address> The email address)
+ <clientSecret> A string of characters generated when requesting an email that you'll supply in subsequent calls to identify yourself
+ <sendAttempt> The number of times the user has requested an email. Leave this the same between requests to retry the request at the transport level. Increment it to request that the email be sent again.
+ """
+ args = self._parse(line, ['address', 'clientSecret', 'sendAttempt'])
+
+ postArgs = {'email': args['address'], 'clientSecret': args['clientSecret'], 'sendAttempt': args['sendAttempt']}
+
+ reactor.callFromThread(self._do_emailrequest, postArgs)
+
+ @defer.inlineCallbacks
+ def _do_emailrequest(self, args):
+ url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/requestToken"
+
+ json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
+ headers={'Content-Type': ['application/x-www-form-urlencoded']})
+ print json_res
+ if 'sid' in json_res:
+ print "Token sent. Your session ID is %s" % (json_res['sid'])
+
+ def do_emailvalidate(self, line):
+ """Validate and associate a third party ID
+ <sid> The session ID (sid) given to you in the response to requestToken
+ <token> The token sent to your third party identifier address
+ <clientSecret> The same clientSecret you supplied in requestToken
+ """
+ args = self._parse(line, ['sid', 'token', 'clientSecret'])
+
+ postArgs = { 'sid' : args['sid'], 'token' : args['token'], 'clientSecret': args['clientSecret'] }
+
+ reactor.callFromThread(self._do_emailvalidate, postArgs)
+
+ @defer.inlineCallbacks
+ def _do_emailvalidate(self, args):
+ url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/submitToken"
+
+ json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
+ headers={'Content-Type': ['application/x-www-form-urlencoded']})
+ print json_res
+
+ def do_3pidbind(self, line):
+ """Validate and associate a third party ID
+ <sid> The session ID (sid) given to you in the response to requestToken
+ <clientSecret> The same clientSecret you supplied in requestToken
+ """
+ args = self._parse(line, ['sid', 'clientSecret'])
+
+ postArgs = { 'sid' : args['sid'], 'clientSecret': args['clientSecret'] }
+ postArgs['mxid'] = self.config["user"]
+
+ reactor.callFromThread(self._do_3pidbind, postArgs)
+
+ @defer.inlineCallbacks
+ def _do_3pidbind(self, args):
+ url = self._identityServerUrl()+"/_matrix/identity/api/v1/3pid/bind"
+
+ json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
+ headers={'Content-Type': ['application/x-www-form-urlencoded']})
+ print json_res
+
+ def do_join(self, line):
+ """Joins a room: "join <roomid>" """
+ try:
+ args = self._parse(line, ["roomid"], force_keys=True)
+ self._do_membership_change(args["roomid"], "join", self._usr())
+ except Exception as e:
+ print e
+
+ def do_joinalias(self, line):
+ try:
+ args = self._parse(line, ["roomname"], force_keys=True)
+ path = "/join/%s" % urllib.quote(args["roomname"])
+ reactor.callFromThread(self._run_and_pprint, "POST", path, {})
+ except Exception as e:
+ print e
+
+ def do_topic(self, line):
+ """"topic [set|get] <roomid> [<newtopic>]"
+ Set the topic for a room: topic set <roomid> <newtopic>
+ Get the topic for a room: topic get <roomid>
+ """
+ try:
+ args = self._parse(line, ["action", "roomid", "topic"])
+ if "action" not in args or "roomid" not in args:
+ print "Must specify set|get and a room ID."
+ return
+ if args["action"].lower() not in ["set", "get"]:
+ print "Must specify set|get, not %s" % args["action"]
+ return
+
+ path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
+
+ if args["action"].lower() == "set":
+ if "topic" not in args:
+ print "Must specify a new topic."
+ return
+ body = {
+ "topic": args["topic"]
+ }
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, body)
+ elif args["action"].lower() == "get":
+ reactor.callFromThread(self._run_and_pprint, "GET", path)
+ except Exception as e:
+ print e
+
+ def do_invite(self, line):
+ """Invite a user to a room: "invite <userid> <roomid>" """
+ try:
+ args = self._parse(line, ["userid", "roomid"], force_keys=True)
+
+ user_id = args["userid"]
+
+ reactor.callFromThread(self._do_invite, args["roomid"], user_id)
+ except Exception as e:
+ print e
+
+ @defer.inlineCallbacks
+ def _do_invite(self, roomid, userstring):
+ if (not userstring.startswith('@') and
+ self._is_on("complete_usernames")):
+ url = self._identityServerUrl()+"/_matrix/identity/api/v1/lookup"
+
+ json_res = yield self.http_client.do_request("GET", url, qparams={'medium':'email','address':userstring})
+
+ mxid = None
+
+ if 'mxid' in json_res and 'signatures' in json_res:
+ url = self._identityServerUrl()+"/_matrix/identity/api/v1/pubkey/ed25519"
+
+ pubKey = None
+ pubKeyObj = yield self.http_client.do_request("GET", url)
+ if 'public_key' in pubKeyObj:
+ pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
+ else:
+ print "No public key found in pubkey response!"
+
+ sigValid = False
+
+ if pubKey:
+ for signame in json_res['signatures']:
+ if signame not in TRUSTED_ID_SERVERS:
+ print "Ignoring signature from untrusted server %s" % (signame)
+ else:
+ try:
+ verify_signed_json(json_res, signame, pubKey)
+ sigValid = True
+ print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
+ break
+ except SignatureVerifyException as e:
+ print "Invalid signature from %s" % (signame)
+ print e
+
+ if sigValid:
+ print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
+ mxid = json_res['mxid']
+ else:
+ print "Got association for %s but couldn't verify signature" % (userstring)
+
+ if not mxid:
+ mxid = "@" + userstring + ":" + self._domain()
+
+ self._do_membership_change(roomid, "invite", mxid)
+
+ def do_leave(self, line):
+ """Leaves a room: "leave <roomid>" """
+ try:
+ args = self._parse(line, ["roomid"], force_keys=True)
+ self._do_membership_change(args["roomid"], "leave", self._usr())
+ except Exception as e:
+ print e
+
+ def do_send(self, line):
+ """Sends a message. "send <roomid> <body>" """
+ args = self._parse(line, ["roomid", "body"])
+ txn_id = "txn%s" % int(time.time())
+ path = "/rooms/%s/send/m.room.message/%s" % (urllib.quote(args["roomid"]),
+ txn_id)
+ body_json = {
+ "msgtype": "m.text",
+ "body": args["body"]
+ }
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, body_json)
+
+ def do_list(self, line):
+ """List data about a room.
+ "list members <roomid> [query]" - List all the members in this room.
+ "list messages <roomid> [query]" - List all the messages in this room.
+
+ Where [query] will be directly applied as query parameters, allowing
+ you to use the pagination API. E.g. the last 3 messages in this room:
+ "list messages <roomid> from=END&to=START&limit=3"
+ """
+ args = self._parse(line, ["type", "roomid", "qp"])
+ if not "type" in args or not "roomid" in args:
+ print "Must specify type and room ID."
+ return
+ if args["type"] not in ["members", "messages"]:
+ print "Unrecognised type: %s" % args["type"]
+ return
+ room_id = args["roomid"]
+ path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
+
+ qp = {"access_token": self._tok()}
+ if "qp" in args:
+ for key_value_str in args["qp"].split("&"):
+ try:
+ key_value = key_value_str.split("=")
+ qp[key_value[0]] = key_value[1]
+ except:
+ print "Bad query param: %s" % key_value
+ return
+
+ reactor.callFromThread(self._run_and_pprint, "GET", path,
+ query_params=qp)
+
+ def do_create(self, line):
+ """Creates a room.
+ "create [public|private] <roomname>" - Create a room <roomname> with the
+ specified visibility.
+ "create <roomname>" - Create a room <roomname> with default visibility.
+ "create [public|private]" - Create a room with specified visibility.
+ "create" - Create a room with default visibility.
+ """
+ args = self._parse(line, ["vis", "roomname"])
+ # fixup args depending on which were set
+ body = {}
+ if "vis" in args and args["vis"] in ["public", "private"]:
+ body["visibility"] = args["vis"]
+
+ if "roomname" in args:
+ room_name = args["roomname"]
+ body["room_alias_name"] = room_name
+ elif "vis" in args and args["vis"] not in ["public", "private"]:
+ room_name = args["vis"]
+ body["room_alias_name"] = room_name
+
+ reactor.callFromThread(self._run_and_pprint, "POST", "/createRoom", body)
+
+ def do_raw(self, line):
+ """Directly send a JSON object: "raw <method> <path> <data> <notoken>"
+ <method>: Required. One of "PUT", "GET", "POST", "xPUT", "xGET",
+ "xPOST". Methods with 'x' prefixed will not automatically append the
+ access token.
+ <path>: Required. E.g. "/events"
+ <data>: Optional. E.g. "{ "msgtype":"custom.text", "body":"abc123"}"
+ """
+ args = self._parse(line, ["method", "path", "data"])
+ # sanity check
+ if "method" not in args or "path" not in args:
+ print "Must specify path and method."
+ return
+
+ args["method"] = args["method"].upper()
+ valid_methods = ["PUT", "GET", "POST", "DELETE",
+ "XPUT", "XGET", "XPOST", "XDELETE"]
+ if args["method"] not in valid_methods:
+ print "Unsupported method: %s" % args["method"]
+ return
+
+ if "data" not in args:
+ args["data"] = None
+ else:
+ try:
+ args["data"] = json.loads(args["data"])
+ except Exception as e:
+ print "Data is not valid JSON. %s" % e
+ return
+
+ qp = {"access_token": self._tok()}
+ if args["method"].startswith("X"):
+ qp = {} # remove access token
+ args["method"] = args["method"][1:] # snip the X
+ else:
+ # append any query params the user has set
+ try:
+ parsed_url = urlparse.urlparse(args["path"])
+ qp.update(urlparse.parse_qs(parsed_url.query))
+ args["path"] = parsed_url.path
+ except:
+ pass
+
+ reactor.callFromThread(self._run_and_pprint, args["method"],
+ args["path"],
+ args["data"],
+ query_params=qp)
+
+ def do_stream(self, line):
+ """Stream data from the server: "stream <longpoll timeout ms>" """
+ args = self._parse(line, ["timeout"])
+ timeout = 5000
+ if "timeout" in args:
+ try:
+ timeout = int(args["timeout"])
+ except ValueError:
+ print "Timeout must be in milliseconds."
+ return
+ reactor.callFromThread(self._do_event_stream, timeout)
+
+ @defer.inlineCallbacks
+ def _do_event_stream(self, timeout):
+ res = yield self.http_client.get_json(
+ self._url() + "/events",
+ {
+ "access_token": self._tok(),
+ "timeout": str(timeout),
+ "from": self.event_stream_token
+ })
+ print json.dumps(res, indent=4)
+
+ if "chunk" in res:
+ for event in res["chunk"]:
+ if (event["type"] == "m.room.message" and
+ self._is_on("send_delivery_receipts") and
+ event["user_id"] != self._usr()): # not sent by us
+ self._send_receipt(event, "d")
+
+ # update the position in the stram
+ if "end" in res:
+ self.event_stream_token = res["end"]
+
+ def _send_receipt(self, event, feedback_type):
+ path = ("/rooms/%s/messages/%s/%s/feedback/%s/%s" %
+ (urllib.quote(event["room_id"]), event["user_id"], event["msg_id"],
+ self._usr(), feedback_type))
+ data = {}
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data,
+ alt_text="Sent receipt for %s" % event["msg_id"])
+
+ def _do_membership_change(self, roomid, membership, userid):
+ path = "/rooms/%s/state/m.room.member/%s" % (urllib.quote(roomid), urllib.quote(userid))
+ data = {
+ "membership": membership
+ }
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
+
+ def do_displayname(self, line):
+ """Get or set my displayname: "displayname [new_name]" """
+ args = self._parse(line, ["name"])
+ path = "/profile/%s/displayname" % (self.config["user"])
+
+ if "name" in args:
+ data = {"displayname": args["name"]}
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
+ else:
+ reactor.callFromThread(self._run_and_pprint, "GET", path)
+
+ def _do_presence_state(self, state, line):
+ args = self._parse(line, ["msgstring"])
+ path = "/presence/%s/status" % (self.config["user"])
+ data = {"state": state}
+ if "msgstring" in args:
+ data["status_msg"] = args["msgstring"]
+
+ reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
+
+ def do_offline(self, line):
+ """Set my presence state to OFFLINE"""
+ self._do_presence_state(0, line)
+
+ def do_away(self, line):
+ """Set my presence state to AWAY"""
+ self._do_presence_state(1, line)
+
+ def do_online(self, line):
+ """Set my presence state to ONLINE"""
+ self._do_presence_state(2, line)
+
+ def _parse(self, line, keys, force_keys=False):
+ """ Parses the given line.
+
+ Args:
+ line : The line to parse
+ keys : A list of keys to map onto the args
+ force_keys : True to enforce that the line has a value for every key
+ Returns:
+ A dict of key:arg
+ """
+ line_args = shlex.split(line)
+ if force_keys and len(line_args) != len(keys):
+ raise IndexError("Must specify all args: %s" % keys)
+
+ # do $ substitutions
+ for i, arg in enumerate(line_args):
+ for config_key in self.config:
+ if ("$" + config_key) in arg:
+ arg = arg.replace("$" + config_key,
+ self.config[config_key])
+ line_args[i] = arg
+
+ return dict(zip(keys, line_args))
+
+ @defer.inlineCallbacks
+ def _run_and_pprint(self, method, path, data=None,
+ query_params={"access_token": None}, alt_text=None):
+ """ Runs an HTTP request and pretty prints the output.
+
+ Args:
+ method: HTTP method
+ path: Relative path
+ data: Raw JSON data if any
+ query_params: dict of query parameters to add to the url
+ """
+ url = self._url() + path
+ if "access_token" in query_params:
+ query_params["access_token"] = self._tok()
+
+ json_res = yield self.http_client.do_request(method, url,
+ data=data,
+ qparams=query_params)
+ if alt_text:
+ print alt_text
+ else:
+ print json.dumps(json_res, indent=4)
+
+
+def save_config(config):
+ with open(CONFIG_JSON, 'w') as out:
+ json.dump(config, out)
+
+
+def main(server_url, identity_server_url, username, token, config_path):
+ print "Synapse command line client"
+ print "==========================="
+ print "Server: %s" % server_url
+ print "Type 'help' to get started."
+ print "Close this console with CTRL+C then CTRL+D."
+ if not username or not token:
+ print "- 'register <username>' - Register an account"
+ print "- 'stream' - Connect to the event stream"
+ print "- 'create <roomid>' - Create a room"
+ print "- 'send <roomid> <message>' - Send a message"
+ http_client = TwistedHttpClient()
+
+ # the command line client
+ syn_cmd = SynapseCmd(http_client, server_url, identity_server_url, username, token)
+
+ # load synapse.json config from a previous session
+ global CONFIG_JSON
+ CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
+ try:
+ with open(config_path, 'r') as config:
+ syn_cmd.config = json.load(config)
+ try:
+ http_client.verbose = "on" == syn_cmd.config["verbose"]
+ except:
+ pass
+ print "Loaded config from %s" % config_path
+ except:
+ pass
+
+ # Twisted-specific: Runs the command processor in Twisted's event loop
+ # to maintain a single thread for both commands and event processing.
+ # If using another HTTP client, just call syn_cmd.cmdloop()
+ reactor.callInThread(syn_cmd.cmdloop)
+ reactor.run()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser("Starts a synapse client.")
+ parser.add_argument(
+ "-s", "--server", dest="server", default="http://localhost:8008",
+ help="The URL of the home server to talk to.")
+ parser.add_argument(
+ "-i", "--identity-server", dest="identityserver", default="http://localhost:8090",
+ help="The URL of the identity server to talk to.")
+ parser.add_argument(
+ "-u", "--username", dest="username",
+ help="Your username on the server.")
+ parser.add_argument(
+ "-t", "--token", dest="token",
+ help="Your access token.")
+ parser.add_argument(
+ "-c", "--config", dest="config", default=CONFIG_JSON,
+ help="The location of the config.json file to read from.")
+ args = parser.parse_args()
+
+ if not args.server:
+ print "You must supply a server URL to communicate with."
+ parser.print_help()
+ sys.exit(1)
+
+ server = args.server
+ if not server.startswith("http://"):
+ server = "http://" + args.server
+
+ main(server, args.identityserver, args.username, args.token, args.config)
diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py
new file mode 100644
index 00000000..869f782e
--- /dev/null
+++ b/contrib/cmdclient/http.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.web.client import Agent, readBody
+from twisted.web.http_headers import Headers
+from twisted.internet import defer, reactor
+
+from pprint import pformat
+
+import json
+import urllib
+
+
+class HttpClient(object):
+ """ Interface for talking json over http
+ """
+
+ def put_json(self, url, data):
+ """ Sends the specifed json data using PUT
+
+ Args:
+ url (str): The URL to PUT data to.
+ data (dict): A dict containing the data that will be used as
+ the request body. This will be encoded as JSON.
+
+ Returns:
+ Deferred: Succeeds when we get *any* HTTP response.
+
+ The result of the deferred is a tuple of `(code, response)`,
+ where `response` is a dict representing the decoded JSON body.
+ """
+ pass
+
+ def get_json(self, url, args=None):
+ """ Get's some json from the given host homeserver and path
+
+ Args:
+ url (str): The URL to GET data from.
+ args (dict): A dictionary used to create query strings, defaults to
+ None.
+ **Note**: The value of each key is assumed to be an iterable
+ and *not* a string.
+
+ Returns:
+ Deferred: Succeeds when we get *any* HTTP response.
+
+ The result of the deferred is a tuple of `(code, response)`,
+ where `response` is a dict representing the decoded JSON body.
+ """
+ pass
+
+
+class TwistedHttpClient(HttpClient):
+ """ Wrapper around the twisted HTTP client api.
+
+ Attributes:
+ agent (twisted.web.client.Agent): The twisted Agent used to send the
+ requests.
+ """
+
+ def __init__(self):
+ self.agent = Agent(reactor)
+
+ @defer.inlineCallbacks
+ def put_json(self, url, data):
+ response = yield self._create_put_request(
+ url,
+ data,
+ headers_dict={"Content-Type": ["application/json"]}
+ )
+ body = yield readBody(response)
+ defer.returnValue((response.code, body))
+
+ @defer.inlineCallbacks
+ def get_json(self, url, args=None):
+ if args:
+ # generates a list of strings of form "k=v".
+ qs = urllib.urlencode(args, True)
+ url = "%s?%s" % (url, qs)
+ response = yield self._create_get_request(url)
+ body = yield readBody(response)
+ defer.returnValue(json.loads(body))
+
+ def _create_put_request(self, url, json_data, headers_dict={}):
+ """ Wrapper of _create_request to issue a PUT request
+ """
+
+ if "Content-Type" not in headers_dict:
+ raise defer.error(
+ RuntimeError("Must include Content-Type header for PUTs"))
+
+ return self._create_request(
+ "PUT",
+ url,
+ producer=_JsonProducer(json_data),
+ headers_dict=headers_dict
+ )
+
+ def _create_get_request(self, url, headers_dict={}):
+ """ Wrapper of _create_request to issue a GET request
+ """
+ return self._create_request(
+ "GET",
+ url,
+ headers_dict=headers_dict
+ )
+
+ @defer.inlineCallbacks
+ def do_request(self, method, url, data=None, qparams=None, jsonreq=True, headers={}):
+ if qparams:
+ url = "%s?%s" % (url, urllib.urlencode(qparams, True))
+
+ if jsonreq:
+ prod = _JsonProducer(data)
+ headers['Content-Type'] = ["application/json"];
+ else:
+ prod = _RawProducer(data)
+
+ if method in ["POST", "PUT"]:
+ response = yield self._create_request(method, url,
+ producer=prod,
+ headers_dict=headers)
+ else:
+ response = yield self._create_request(method, url)
+
+ body = yield readBody(response)
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def _create_request(self, method, url, producer=None, headers_dict={}):
+ """ Creates and sends a request to the given url
+ """
+ headers_dict["User-Agent"] = ["Synapse Cmd Client"]
+
+ retries_left = 5
+ print "%s to %s with headers %s" % (method, url, headers_dict)
+ if self.verbose and producer:
+ if "password" in producer.data:
+ temp = producer.data["password"]
+ producer.data["password"] = "[REDACTED]"
+ print json.dumps(producer.data, indent=4)
+ producer.data["password"] = temp
+ else:
+ print json.dumps(producer.data, indent=4)
+
+ while True:
+ try:
+ response = yield self.agent.request(
+ method,
+ url.encode("UTF8"),
+ Headers(headers_dict),
+ producer
+ )
+ break
+ except Exception as e:
+ print "uh oh: %s" % e
+ if retries_left:
+ yield self.sleep(2 ** (5 - retries_left))
+ retries_left -= 1
+ else:
+ raise e
+
+ if self.verbose:
+ print "Status %s %s" % (response.code, response.phrase)
+ print pformat(list(response.headers.getAllRawHeaders()))
+ defer.returnValue(response)
+
+ def sleep(self, seconds):
+ d = defer.Deferred()
+ reactor.callLater(seconds, d.callback, seconds)
+ return d
+
+class _RawProducer(object):
+ def __init__(self, data):
+ self.data = data
+ self.body = data
+ self.length = len(self.body)
+
+ def startProducing(self, consumer):
+ consumer.write(self.body)
+ return defer.succeed(None)
+
+ def pauseProducing(self):
+ pass
+
+ def stopProducing(self):
+ pass
+
+class _JsonProducer(object):
+ """ Used by the twisted http client to create the HTTP body from json
+ """
+ def __init__(self, jsn):
+ self.data = jsn
+ self.body = json.dumps(jsn).encode("utf8")
+ self.length = len(self.body)
+
+ def startProducing(self, consumer):
+ consumer.write(self.body)
+ return defer.succeed(None)
+
+ def pauseProducing(self):
+ pass
+
+ def stopProducing(self):
+ pass \ No newline at end of file
diff --git a/contrib/experiments/cursesio.py b/contrib/experiments/cursesio.py
new file mode 100644
index 00000000..95d87a1f
--- /dev/null
+++ b/contrib/experiments/cursesio.py
@@ -0,0 +1,168 @@
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import curses
+import curses.wrapper
+from curses.ascii import isprint
+
+from twisted.internet import reactor
+
+
+class CursesStdIO():
+ def __init__(self, stdscr, callback=None):
+ self.statusText = "Synapse test app -"
+ self.searchText = ''
+ self.stdscr = stdscr
+
+ self.logLine = ''
+
+ self.callback = callback
+
+ self._setup()
+
+ def _setup(self):
+ self.stdscr.nodelay(1) # Make non blocking
+
+ self.rows, self.cols = self.stdscr.getmaxyx()
+ self.lines = []
+
+ curses.use_default_colors()
+
+ self.paintStatus(self.statusText)
+ self.stdscr.refresh()
+
+ def set_callback(self, callback):
+ self.callback = callback
+
+ def fileno(self):
+ """ We want to select on FD 0 """
+ return 0
+
+ def connectionLost(self, reason):
+ self.close()
+
+ def print_line(self, text):
+ """ add a line to the internal list of lines"""
+
+ self.lines.append(text)
+ self.redraw()
+
+ def print_log(self, text):
+ self.logLine = text
+ self.redraw()
+
+ def redraw(self):
+ """ method for redisplaying lines
+ based on internal list of lines """
+
+ self.stdscr.clear()
+ self.paintStatus(self.statusText)
+ i = 0
+ index = len(self.lines) - 1
+ while i < (self.rows - 3) and index >= 0:
+ self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index],
+ curses.A_NORMAL)
+ i = i + 1
+ index = index - 1
+
+ self.printLogLine(self.logLine)
+
+ self.stdscr.refresh()
+
+ def paintStatus(self, text):
+ if len(text) > self.cols:
+ raise RuntimeError("TextTooLongError")
+
+ self.stdscr.addstr(
+ self.rows - 2, 0,
+ text + ' ' * (self.cols - len(text)),
+ curses.A_STANDOUT)
+
+ def printLogLine(self, text):
+ self.stdscr.addstr(
+ 0, 0,
+ text + ' ' * (self.cols - len(text)),
+ curses.A_STANDOUT)
+
+ def doRead(self):
+ """ Input is ready! """
+ curses.noecho()
+ c = self.stdscr.getch() # read a character
+
+ if c == curses.KEY_BACKSPACE:
+ self.searchText = self.searchText[:-1]
+
+ elif c == curses.KEY_ENTER or c == 10:
+ text = self.searchText
+ self.searchText = ''
+
+ self.print_line(">> %s" % text)
+
+ try:
+ if self.callback:
+ self.callback.on_line(text)
+ except Exception as e:
+ self.print_line(str(e))
+
+ self.stdscr.refresh()
+
+ elif isprint(c):
+ if len(self.searchText) == self.cols - 2:
+ return
+ self.searchText = self.searchText + chr(c)
+
+ self.stdscr.addstr(self.rows - 1, 0,
+ self.searchText + (' ' * (
+ self.cols - len(self.searchText) - 2)))
+
+ self.paintStatus(self.statusText + ' %d' % len(self.searchText))
+ self.stdscr.move(self.rows - 1, len(self.searchText))
+ self.stdscr.refresh()
+
+ def logPrefix(self):
+ return "CursesStdIO"
+
+ def close(self):
+ """ clean up """
+
+ curses.nocbreak()
+ self.stdscr.keypad(0)
+ curses.echo()
+ curses.endwin()
+
+
+class Callback(object):
+
+ def __init__(self, stdio):
+ self.stdio = stdio
+
+ def on_line(self, text):
+ self.stdio.print_line(text)
+
+
+def main(stdscr):
+ screen = CursesStdIO(stdscr) # create Screen object
+
+ callback = Callback(screen)
+
+ screen.set_callback(callback)
+
+ stdscr.refresh()
+ reactor.addReader(screen)
+ reactor.run()
+ screen.close()
+
+
+if __name__ == '__main__':
+ curses.wrapper(main)
diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py
new file mode 100644
index 00000000..fedf786c
--- /dev/null
+++ b/contrib/experiments/test_messaging.py
@@ -0,0 +1,394 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+""" This is an example of using the server to server implementation to do a
+basic chat style thing. It accepts commands from stdin and outputs to stdout.
+
+It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
+the address of the remote home server to hit.
+
+Usage:
+ python test_messaging.py <port>
+
+Currently assumes the local address is localhost:<port>
+
+"""
+
+
+from synapse.federation import (
+ ReplicationHandler
+)
+
+from synapse.federation.units import Pdu
+
+from synapse.util import origin_from_ucid
+
+from synapse.app.homeserver import SynapseHomeServer
+
+#from synapse.util.logutils import log_function
+
+from twisted.internet import reactor, defer
+from twisted.python import log
+
+import argparse
+import json
+import logging
+import os
+import re
+
+import cursesio
+import curses.wrapper
+
+
+logger = logging.getLogger("example")
+
+
+def excpetion_errback(failure):
+ logging.exception(failure)
+
+
+class InputOutput(object):
+ """ This is responsible for basic I/O so that a user can interact with
+ the example app.
+ """
+
+ def __init__(self, screen, user):
+ self.screen = screen
+ self.user = user
+
+ def set_home_server(self, server):
+ self.server = server
+
+ def on_line(self, line):
+ """ This is where we process commands.
+ """
+
+ try:
+ m = re.match("^join (\S+)$", line)
+ if m:
+ # The `sender` wants to join a room.
+ room_name, = m.groups()
+ self.print_line("%s joining %s" % (self.user, room_name))
+ self.server.join_room(room_name, self.user, self.user)
+ #self.print_line("OK.")
+ return
+
+ m = re.match("^invite (\S+) (\S+)$", line)
+ if m:
+ # `sender` wants to invite someone to a room
+ room_name, invitee = m.groups()
+ self.print_line("%s invited to %s" % (invitee, room_name))
+ self.server.invite_to_room(room_name, self.user, invitee)
+ #self.print_line("OK.")
+ return
+
+ m = re.match("^send (\S+) (.*)$", line)
+ if m:
+ # `sender` wants to message a room
+ room_name, body = m.groups()
+ self.print_line("%s send to %s" % (self.user, room_name))
+ self.server.send_message(room_name, self.user, body)
+ #self.print_line("OK.")
+ return
+
+ m = re.match("^backfill (\S+)$", line)
+ if m:
+ # we want to backfill a room
+ room_name, = m.groups()
+ self.print_line("backfill %s" % room_name)
+ self.server.backfill(room_name)
+ return
+
+ self.print_line("Unrecognized command")
+
+ except Exception as e:
+ logger.exception(e)
+
+ def print_line(self, text):
+ self.screen.print_line(text)
+
+ def print_log(self, text):
+ self.screen.print_log(text)
+
+
+class IOLoggerHandler(logging.Handler):
+
+ def __init__(self, io):
+ logging.Handler.__init__(self)
+ self.io = io
+
+ def emit(self, record):
+ if record.levelno < logging.WARN:
+ return
+
+ msg = self.format(record)
+ self.io.print_log(msg)
+
+
+class Room(object):
+ """ Used to store (in memory) the current membership state of a room, and
+ which home servers we should send PDUs associated with the room to.
+ """
+ def __init__(self, room_name):
+ self.room_name = room_name
+ self.invited = set()
+ self.participants = set()
+ self.servers = set()
+
+ self.oldest_server = None
+
+ self.have_got_metadata = False
+
+ def add_participant(self, participant):
+ """ Someone has joined the room
+ """
+ self.participants.add(participant)
+ self.invited.discard(participant)
+
+ server = origin_from_ucid(participant)
+ self.servers.add(server)
+
+ if not self.oldest_server:
+ self.oldest_server = server
+
+ def add_invited(self, invitee):
+ """ Someone has been invited to the room
+ """
+ self.invited.add(invitee)
+ self.servers.add(origin_from_ucid(invitee))
+
+
+class HomeServer(ReplicationHandler):
+ """ A very basic home server implentation that allows people to join a
+ room and then invite other people.
+ """
+ def __init__(self, server_name, replication_layer, output):
+ self.server_name = server_name
+ self.replication_layer = replication_layer
+ self.replication_layer.set_handler(self)
+
+ self.joined_rooms = {}
+
+ self.output = output
+
+ def on_receive_pdu(self, pdu):
+ """ We just received a PDU
+ """
+ pdu_type = pdu.pdu_type
+
+ if pdu_type == "sy.room.message":
+ self._on_message(pdu)
+ elif pdu_type == "sy.room.member" and "membership" in pdu.content:
+ if pdu.content["membership"] == "join":
+ self._on_join(pdu.context, pdu.state_key)
+ elif pdu.content["membership"] == "invite":
+ self._on_invite(pdu.origin, pdu.context, pdu.state_key)
+ else:
+ self.output.print_line("#%s (unrec) %s = %s" %
+ (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
+ )
+
+ #def on_state_change(self, pdu):
+ ##self.output.print_line("#%s (state) %s *** %s" %
+ ##(pdu.context, pdu.state_key, pdu.pdu_type)
+ ##)
+
+ #if "joinee" in pdu.content:
+ #self._on_join(pdu.context, pdu.content["joinee"])
+ #elif "invitee" in pdu.content:
+ #self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
+
+ def _on_message(self, pdu):
+ """ We received a message
+ """
+ self.output.print_line("#%s %s %s" %
+ (pdu.context, pdu.content["sender"], pdu.content["body"])
+ )
+
+ def _on_join(self, context, joinee):
+ """ Someone has joined a room, either a remote user or a local user
+ """
+ room = self._get_or_create_room(context)
+ room.add_participant(joinee)
+
+ self.output.print_line("#%s %s %s" %
+ (context, joinee, "*** JOINED")
+ )
+
+ def _on_invite(self, origin, context, invitee):
+ """ Someone has been invited
+ """
+ room = self._get_or_create_room(context)
+ room.add_invited(invitee)
+
+ self.output.print_line("#%s %s %s" %
+ (context, invitee, "*** INVITED")
+ )
+
+ if not room.have_got_metadata and origin is not self.server_name:
+ logger.debug("Get room state")
+ self.replication_layer.get_state_for_context(origin, context)
+ room.have_got_metadata = True
+
+ @defer.inlineCallbacks
+ def send_message(self, room_name, sender, body):
+ """ Send a message to a room!
+ """
+ destinations = yield self.get_servers_for_context(room_name)
+
+ try:
+ yield self.replication_layer.send_pdu(
+ Pdu.create_new(
+ context=room_name,
+ pdu_type="sy.room.message",
+ content={"sender": sender, "body": body},
+ origin=self.server_name,
+ destinations=destinations,
+ )
+ )
+ except Exception as e:
+ logger.exception(e)
+
+ @defer.inlineCallbacks
+ def join_room(self, room_name, sender, joinee):
+ """ Join a room!
+ """
+ self._on_join(room_name, joinee)
+
+ destinations = yield self.get_servers_for_context(room_name)
+
+ try:
+ pdu = Pdu.create_new(
+ context=room_name,
+ pdu_type="sy.room.member",
+ is_state=True,
+ state_key=joinee,
+ content={"membership": "join"},
+ origin=self.server_name,
+ destinations=destinations,
+ )
+ yield self.replication_layer.send_pdu(pdu)
+ except Exception as e:
+ logger.exception(e)
+
+ @defer.inlineCallbacks
+ def invite_to_room(self, room_name, sender, invitee):
+ """ Invite someone to a room!
+ """
+ self._on_invite(self.server_name, room_name, invitee)
+
+ destinations = yield self.get_servers_for_context(room_name)
+
+ try:
+ yield self.replication_layer.send_pdu(
+ Pdu.create_new(
+ context=room_name,
+ is_state=True,
+ pdu_type="sy.room.member",
+ state_key=invitee,
+ content={"membership": "invite"},
+ origin=self.server_name,
+ destinations=destinations,
+ )
+ )
+ except Exception as e:
+ logger.exception(e)
+
+ def backfill(self, room_name, limit=5):
+ room = self.joined_rooms.get(room_name)
+
+ if not room:
+ return
+
+ dest = room.oldest_server
+
+ return self.replication_layer.backfill(dest, room_name, limit)
+
+ def _get_room_remote_servers(self, room_name):
+ return [i for i in self.joined_rooms.setdefault(room_name,).servers]
+
+ def _get_or_create_room(self, room_name):
+ return self.joined_rooms.setdefault(room_name, Room(room_name))
+
+ def get_servers_for_context(self, context):
+ return defer.succeed(
+ self.joined_rooms.setdefault(context, Room(context)).servers
+ )
+
+
+def main(stdscr):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('user', type=str)
+ parser.add_argument('-v', '--verbose', action='count')
+ args = parser.parse_args()
+
+ user = args.user
+ server_name = origin_from_ucid(user)
+
+ ## Set up logging ##
+
+ root_logger = logging.getLogger()
+
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)d - '
+ '%(levelname)s - %(message)s')
+ if not os.path.exists("logs"):
+ os.makedirs("logs")
+ fh = logging.FileHandler("logs/%s" % user)
+ fh.setFormatter(formatter)
+
+ root_logger.addHandler(fh)
+ root_logger.setLevel(logging.DEBUG)
+
+ # Hack: The only way to get it to stop logging to sys.stderr :(
+ log.theLogPublisher.observers = []
+ observer = log.PythonLoggingObserver()
+ observer.start()
+
+ ## Set up synapse server
+
+ curses_stdio = cursesio.CursesStdIO(stdscr)
+ input_output = InputOutput(curses_stdio, user)
+
+ curses_stdio.set_callback(input_output)
+
+ app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
+ replication = app_hs.get_replication_layer()
+
+ hs = HomeServer(server_name, replication, curses_stdio)
+
+ input_output.set_home_server(hs)
+
+ ## Add input_output logger
+ io_logger = IOLoggerHandler(input_output)
+ io_logger.setFormatter(formatter)
+ root_logger.addHandler(io_logger)
+
+ ## Start! ##
+
+ try:
+ port = int(server_name.split(":")[1])
+ except:
+ port = 12345
+
+ app_hs.get_http_server().start_listening(port)
+
+ reactor.addReader(curses_stdio)
+
+ reactor.run()
+
+
+if __name__ == "__main__":
+ curses.wrapper(main)
diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
new file mode 100644
index 00000000..b2acadcf
--- /dev/null
+++ b/contrib/graph/graph.py
@@ -0,0 +1,151 @@
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sqlite3
+import pydot
+import cgi
+import json
+import datetime
+import argparse
+import urllib2
+
+
+def make_name(pdu_id, origin):
+ return "%s@%s" % (pdu_id, origin)
+
+
+def make_graph(pdus, room, filename_prefix):
+ pdu_map = {}
+ node_map = {}
+
+ origins = set()
+ colors = set(("red", "green", "blue", "yellow", "purple"))
+
+ for pdu in pdus:
+ origins.add(pdu.get("origin"))
+
+ color_map = {color: color for color in colors if color in origins}
+ colors -= set(color_map.values())
+
+ color_map[None] = "black"
+
+ for o in origins:
+ if o in color_map:
+ continue
+ try:
+ c = colors.pop()
+ color_map[o] = c
+ except:
+ print "Run out of colours!"
+ color_map[o] = "black"
+
+ graph = pydot.Dot(graph_name="Test")
+
+ for pdu in pdus:
+ name = make_name(pdu.get("pdu_id"), pdu.get("origin"))
+ pdu_map[name] = pdu
+
+ t = datetime.datetime.fromtimestamp(
+ float(pdu["ts"]) / 1000
+ ).strftime('%Y-%m-%d %H:%M:%S,%f')
+
+ label = (
+ "<"
+ "<b>%(name)s </b><br/>"
+ "Type: <b>%(type)s </b><br/>"
+ "State key: <b>%(state_key)s </b><br/>"
+ "Content: <b>%(content)s </b><br/>"
+ "Time: <b>%(time)s </b><br/>"
+ "Depth: <b>%(depth)s </b><br/>"
+ ">"
+ ) % {
+ "name": name,
+ "type": pdu.get("pdu_type"),
+ "state_key": pdu.get("state_key"),
+ "content": cgi.escape(json.dumps(pdu.get("content")), quote=True),
+ "time": t,
+ "depth": pdu.get("depth"),
+ }
+
+ node = pydot.Node(
+ name=name,
+ label=label,
+ color=color_map[pdu.get("origin")]
+ )
+ node_map[name] = node
+ graph.add_node(node)
+
+ for pdu in pdus:
+ start_name = make_name(pdu.get("pdu_id"), pdu.get("origin"))
+ for i, o in pdu.get("prev_pdus", []):
+ end_name = make_name(i, o)
+
+ if end_name not in node_map:
+ print "%s not in nodes" % end_name
+ continue
+
+ edge = pydot.Edge(node_map[start_name], node_map[end_name])
+ graph.add_edge(edge)
+
+ # Add prev_state edges, if they exist
+ if pdu.get("prev_state_id") and pdu.get("prev_state_origin"):
+ prev_state_name = make_name(
+ pdu.get("prev_state_id"), pdu.get("prev_state_origin")
+ )
+
+ if prev_state_name in node_map:
+ state_edge = pydot.Edge(
+ node_map[start_name], node_map[prev_state_name],
+ style='dotted'
+ )
+ graph.add_edge(state_edge)
+
+ graph.write('%s.dot' % filename_prefix, format='raw', prog='dot')
+# graph.write_png("%s.png" % filename_prefix, prog='dot')
+ graph.write_svg("%s.svg" % filename_prefix, prog='dot')
+
+
+def get_pdus(host, room):
+ transaction = json.loads(
+ urllib2.urlopen(
+ "http://%s/_matrix/federation/v1/context/%s/" % (host, room)
+ ).read()
+ )
+
+ return transaction["pdus"]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Generate a PDU graph for a given room by talking "
+ "to the given homeserver to get the list of PDUs. \n"
+ "Requires pydot."
+ )
+ parser.add_argument(
+ "-p", "--prefix", dest="prefix",
+ help="String to prefix output files with"
+ )
+ parser.add_argument('host')
+ parser.add_argument('room')
+
+ args = parser.parse_args()
+
+ host = args.host
+ room = args.room
+ prefix = args.prefix if args.prefix else "%s_graph" % (room)
+
+ pdus = get_pdus(host, room)
+
+ make_graph(pdus, room, prefix)
diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py
new file mode 100644
index 00000000..d0d2cfe7
--- /dev/null
+++ b/contrib/graph/graph2.py
@@ -0,0 +1,157 @@
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sqlite3
+import pydot
+import cgi
+import json
+import datetime
+import argparse
+
+from synapse.events import FrozenEvent
+from synapse.util.frozenutils import unfreeze
+
+
+def make_graph(db_name, room_id, file_prefix, limit):
+ conn = sqlite3.connect(db_name)
+
+ sql = (
+ "SELECT json FROM event_json as j "
+ "INNER JOIN events as e ON e.event_id = j.event_id "
+ "WHERE j.room_id = ?"
+ )
+
+ args = [room_id]
+
+ if limit:
+ sql += (
+ " ORDER BY topological_ordering DESC, stream_ordering DESC "
+ "LIMIT ?"
+ )
+
+ args.append(limit)
+
+ c = conn.execute(sql, args)
+
+ events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
+
+ events.sort(key=lambda e: e.depth)
+
+ node_map = {}
+ state_groups = {}
+
+ graph = pydot.Dot(graph_name="Test")
+
+ for event in events:
+ c = conn.execute(
+ "SELECT state_group FROM event_to_state_groups "
+ "WHERE event_id = ?",
+ (event.event_id,)
+ )
+
+ res = c.fetchone()
+ state_group = res[0] if res else None
+
+ if state_group is not None:
+ state_groups.setdefault(state_group, []).append(event.event_id)
+
+ t = datetime.datetime.fromtimestamp(
+ float(event.origin_server_ts) / 1000
+ ).strftime('%Y-%m-%d %H:%M:%S,%f')
+
+ content = json.dumps(unfreeze(event.get_dict()["content"]))
+
+ label = (
+ "<"
+ "<b>%(name)s </b><br/>"
+ "Type: <b>%(type)s </b><br/>"
+ "State key: <b>%(state_key)s </b><br/>"
+ "Content: <b>%(content)s </b><br/>"
+ "Time: <b>%(time)s </b><br/>"
+ "Depth: <b>%(depth)s </b><br/>"
+ "State group: %(state_group)s<br/>"
+ ">"
+ ) % {
+ "name": event.event_id,
+ "type": event.type,
+ "state_key": event.get("state_key", None),
+ "content": cgi.escape(content, quote=True),
+ "time": t,
+ "depth": event.depth,
+ "state_group": state_group,
+ }
+
+ node = pydot.Node(
+ name=event.event_id,
+ label=label,
+ )
+
+ node_map[event.event_id] = node
+ graph.add_node(node)
+
+ for event in events:
+ for prev_id, _ in event.prev_events:
+ try:
+ end_node = node_map[prev_id]
+ except:
+ end_node = pydot.Node(
+ name=prev_id,
+ label="<<b>%s</b>>" % (prev_id,),
+ )
+
+ node_map[prev_id] = end_node
+ graph.add_node(end_node)
+
+ edge = pydot.Edge(node_map[event.event_id], end_node)
+ graph.add_edge(edge)
+
+ for group, event_ids in state_groups.items():
+ if len(event_ids) <= 1:
+ continue
+
+ cluster = pydot.Cluster(
+ str(group),
+ label="<State Group: %s>" % (str(group),)
+ )
+
+ for event_id in event_ids:
+ cluster.add_node(node_map[event_id])
+
+ graph.add_subgraph(cluster)
+
+ graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
+ graph.write_svg("%s.svg" % file_prefix, prog='dot')
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Generate a PDU graph for a given room by talking "
+ "to the given homeserver to get the list of PDUs. \n"
+ "Requires pydot."
+ )
+ parser.add_argument(
+ "-p", "--prefix", dest="prefix",
+ help="String to prefix output files with",
+ default="graph_output"
+ )
+ parser.add_argument(
+ "-l", "--limit",
+ help="Only retrieve the last N events.",
+ )
+ parser.add_argument('db')
+ parser.add_argument('room')
+
+ args = parser.parse_args()
+
+ make_graph(args.db, args.room, args.prefix, args.limit)
diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py
new file mode 100644
index 00000000..15f8e1c4
--- /dev/null
+++ b/contrib/jitsimeetbridge/jitsimeetbridge.py
@@ -0,0 +1,260 @@
+#!/usr/bin/env python
+
+"""
+This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
+video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
+of the streams from the Jitsi bridge until the second lot of SDP comes down and
+we set the remote SDP at which point the stream ends. Our video never gets to
+the bridge.
+
+Requires:
+npm install jquery jsdom
+"""
+
+import gevent
+import grequests
+from BeautifulSoup import BeautifulSoup
+import json
+import urllib
+import subprocess
+import time
+
+#ACCESS_TOKEN="" #
+
+MATRIXBASE = 'https://matrix.org/_matrix/client/api/v1/'
+MYUSERNAME = '@davetest:matrix.org'
+
+HTTPBIND = 'https://meet.jit.si/http-bind'
+#HTTPBIND = 'https://jitsi.vuc.me/http-bind'
+#ROOMNAME = "matrix"
+ROOMNAME = "pibble"
+
+HOST="guest.jit.si"
+#HOST="jitsi.vuc.me"
+
+TURNSERVER="turn.guest.jit.si"
+#TURNSERVER="turn.jitsi.vuc.me"
+
+ROOMDOMAIN="meet.jit.si"
+#ROOMDOMAIN="conference.jitsi.vuc.me"
+
+class TrivialMatrixClient:
+ def __init__(self, access_token):
+ self.token = None
+ self.access_token = access_token
+
+ def getEvent(self):
+ while True:
+ url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
+ if self.token:
+ url += "&from="+self.token
+ req = grequests.get(url)
+ resps = grequests.map([req])
+ obj = json.loads(resps[0].content)
+ print "incoming from matrix",obj
+ if 'end' not in obj:
+ continue
+ self.token = obj['end']
+ if len(obj['chunk']):
+ return obj['chunk'][0]
+
+ def joinRoom(self, roomId):
+ url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
+ print url
+ headers={ 'Content-Type': 'application/json' }
+ req = grequests.post(url, headers=headers, data='{}')
+ resps = grequests.map([req])
+ obj = json.loads(resps[0].content)
+ print "response: ",obj
+
+ def sendEvent(self, roomId, evType, event):
+ url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
+ print url
+ print json.dumps(event)
+ headers={ 'Content-Type': 'application/json' }
+ req = grequests.post(url, headers=headers, data=json.dumps(event))
+ resps = grequests.map([req])
+ obj = json.loads(resps[0].content)
+ print "response: ",obj
+
+
+
+xmppClients = {}
+
+
+def matrixLoop():
+ while True:
+ ev = matrixCli.getEvent()
+ print ev
+ if ev['type'] == 'm.room.member':
+ print 'membership event'
+ if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
+ roomId = ev['room_id']
+ print "joining room %s" % (roomId)
+ matrixCli.joinRoom(roomId)
+ elif ev['type'] == 'm.room.message':
+ if ev['room_id'] in xmppClients:
+ print "already have a bridge for that user, ignoring"
+ continue
+ print "got message, connecting"
+ xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
+ gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
+ elif ev['type'] == 'm.call.invite':
+ print "Incoming call"
+ #sdp = ev['content']['offer']['sdp']
+ #print "sdp: %s" % (sdp)
+ #xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
+ #gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
+ elif ev['type'] == 'm.call.answer':
+ print "Call answered"
+ sdp = ev['content']['answer']['sdp']
+ if ev['room_id'] not in xmppClients:
+ print "We didn't have a call for that room"
+ continue
+ # should probably check call ID too
+ xmppCli = xmppClients[ev['room_id']]
+ xmppCli.sendAnswer(sdp)
+ elif ev['type'] == 'm.call.hangup':
+ if ev['room_id'] in xmppClients:
+ xmppClients[ev['room_id']].stop()
+ del xmppClients[ev['room_id']]
+
+class TrivialXmppClient:
+ def __init__(self, matrixRoom, userId):
+ self.rid = 0
+ self.matrixRoom = matrixRoom
+ self.userId = userId
+ self.running = True
+
+ def stop(self):
+ self.running = False
+
+ def nextRid(self):
+ self.rid += 1
+ return '%d' % (self.rid)
+
+ def sendIq(self, xml):
+ fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
+ #print "\t>>>%s" % (fullXml)
+ return self.xmppPoke(fullXml)
+
+ def xmppPoke(self, xml):
+ headers = {'Content-Type': 'application/xml'}
+ req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
+ resps = grequests.map([req])
+ obj = BeautifulSoup(resps[0].content)
+ return obj
+
+ def sendAnswer(self, answer):
+ print "sdp from matrix client",answer
+ p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ jingle, out_err = p.communicate(answer)
+ jingle = jingle % {
+ 'tojid': self.callfrom,
+ 'action': 'session-accept',
+ 'initiator': self.callfrom,
+ 'responder': self.jid,
+ 'sid': self.callsid
+ }
+ print "answer jingle from sdp",jingle
+ res = self.sendIq(jingle)
+ print "reply from answer: ",res
+
+ self.ssrcs = {}
+ jingleSoup = BeautifulSoup(jingle)
+ for cont in jingleSoup.iq.jingle.findAll('content'):
+ if cont.description:
+ self.ssrcs[cont['name']] = cont.description['ssrc']
+ print "my ssrcs:",self.ssrcs
+
+ gevent.joinall([
+ gevent.spawn(self.advertiseSsrcs)
+ ])
+
+ def advertiseSsrcs(self):
+ time.sleep(7)
+ print "SSRC spammer started"
+ while self.running:
+ ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
+ res = self.sendIq(ssrcMsg)
+ print "reply from ssrc announce: ",res
+ time.sleep(10)
+
+
+
+ def xmppLoop(self):
+ self.matrixCallId = time.time()
+ res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
+
+ print res
+ self.sid = res.body['sid']
+ print "sid %s" % (self.sid)
+
+ res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
+
+ res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
+
+ res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
+ print res
+
+ self.jid = res.body.iq.bind.jid.string
+ print "jid: %s" % (self.jid)
+ self.shortJid = self.jid.split('-')[0]
+
+ res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
+
+ #randomthing = res.body.iq['to']
+ #whatsitpart = randomthing.split('-')[0]
+
+ #print "other random bind thing: %s" % (randomthing)
+
+ # advertise preence to the jitsi room, with our nick
+ res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
+ self.muc = {'users': []}
+ for p in res.body.findAll('presence'):
+ u = {}
+ u['shortJid'] = p['from'].split('/')[1]
+ if p.c and p.c.nick:
+ u['nick'] = p.c.nick.string
+ self.muc['users'].append(u)
+ print "muc: ",self.muc
+
+ # wait for stuff
+ while True:
+ print "waiting..."
+ res = self.sendIq("")
+ print "got from stream: ",res
+ if res.body.iq:
+ jingles = res.body.iq.findAll('jingle')
+ if len(jingles):
+ self.callfrom = res.body.iq['from']
+ self.handleInvite(jingles[0])
+ elif 'type' in res.body and res.body['type'] == 'terminate':
+ self.running = False
+ del xmppClients[self.matrixRoom]
+ return
+
+ def handleInvite(self, jingle):
+ self.initiator = jingle['initiator']
+ self.callsid = jingle['sid']
+ p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ print "raw jingle invite",str(jingle)
+ sdp, out_err = p.communicate(str(jingle))
+ print "transformed remote offer sdp",sdp
+ inviteEvent = {
+ 'offer': {
+ 'type': 'offer',
+ 'sdp': sdp
+ },
+ 'call_id': self.matrixCallId,
+ 'version': 0,
+ 'lifetime': 30000
+ }
+ matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
+
+matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
+
+gevent.joinall([
+ gevent.spawn(matrixLoop)
+])
+
diff --git a/contrib/jitsimeetbridge/syweb-jitsi-conference.patch b/contrib/jitsimeetbridge/syweb-jitsi-conference.patch
new file mode 100644
index 00000000..aed23c78
--- /dev/null
+++ b/contrib/jitsimeetbridge/syweb-jitsi-conference.patch
@@ -0,0 +1,188 @@
+diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
+index 9fbfff0..dc68077 100644
+--- a/syweb/webclient/app/components/matrix/matrix-call.js
++++ b/syweb/webclient/app/components/matrix/matrix-call.js
+@@ -16,6 +16,45 @@ limitations under the License.
+
+ 'use strict';
+
++
++function sendKeyframe(pc) {
++ console.log('sendkeyframe', pc.iceConnectionState);
++ if (pc.iceConnectionState !== 'connected') return; // safe...
++ pc.setRemoteDescription(
++ pc.remoteDescription,
++ function () {
++ pc.createAnswer(
++ function (modifiedAnswer) {
++ pc.setLocalDescription(
++ modifiedAnswer,
++ function () {
++ // noop
++ },
++ function (error) {
++ console.log('triggerKeyframe setLocalDescription failed', error);
++ messageHandler.showError();
++ }
++ );
++ },
++ function (error) {
++ console.log('triggerKeyframe createAnswer failed', error);
++ messageHandler.showError();
++ }
++ );
++ },
++ function (error) {
++ console.log('triggerKeyframe setRemoteDescription failed', error);
++ messageHandler.showError();
++ }
++ );
++}
++
++
++
++
++
++
++
+ var forAllVideoTracksOnStream = function(s, f) {
+ var tracks = s.getVideoTracks();
+ for (var i = 0; i < tracks.length; i++) {
+@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
+ }
+
+ // FIXME: we should prevent any calls from being placed or accepted before this has finished
+- MatrixCall.getTurnServer();
++ //MatrixCall.getTurnServer();
+
+ MatrixCall.CALL_TIMEOUT = 60000;
+ MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
+@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
+ pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
+ pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
+ pc.onaddstream = function(s) { self.onAddStream(s); };
++
++ var datachan = pc.createDataChannel('RTCDataChannel', {
++ reliable: false
++ });
++ console.log("data chan: "+datachan);
++ datachan.onopen = function() {
++ console.log("data channel open");
++ };
++ datachan.onmessage = function() {
++ console.log("data channel message");
++ };
++ pc.ondatachannel = function(event) {
++ console.log("have data channel");
++ event.channel.binaryType = 'blob';
++ };
++
+ return pc;
+ }
+
+@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
+ }, this.msg.lifetime - event.age);
+ };
+
++ MatrixCall.prototype.receivedInvite = function(event) {
++ console.log("Got second invite for call "+this.call_id);
++ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
++ };
++
++
+ // perverse as it may seem, sometimes we want to instantiate a call with a hangup message
+ // (because when getting the state of the room on load, events come in reverse order and
+ // we want to remember that a call has been hung up)
+@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
+ 'mandatory': {
+ 'OfferToReceiveAudio': true,
+ 'OfferToReceiveVideo': this.type == 'video'
+- },
++ }
+ };
+ this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
+ // This can't be in an apply() because it's called by a predecessor call under glare conditions :(
+@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
+ MatrixCall.prototype.gotLocalIceCandidate = function(event) {
+ if (event.candidate) {
+ console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
+- this.sendCandidate(event.candidate);
+- }
++ //this.sendCandidate(event.candidate);
++ } else {
++ console.log("have all candidates, sending answer");
++ var content = {
++ version: 0,
++ call_id: this.call_id,
++ answer: this.peerConn.localDescription
++ };
++ this.sendEventWithRetry('m.call.answer', content);
++ var self = this;
++ $rootScope.$apply(function() {
++ self.state = 'connecting';
++ });
++ }
+ }
+
+ MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
+@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
+ console.log("Created answer: "+description);
+ var self = this;
+ this.peerConn.setLocalDescription(description, function() {
+- var content = {
+- version: 0,
+- call_id: self.call_id,
+- answer: self.peerConn.localDescription
+- };
+- self.sendEventWithRetry('m.call.answer', content);
+- $rootScope.$apply(function() {
+- self.state = 'connecting';
+- });
+ }, function() { console.log("Error setting local description!"); } );
+ };
+
+@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
+ $rootScope.$apply(function() {
+ self.state = 'connected';
+ self.didConnect = true;
++ /*$timeout(function() {
++ sendKeyframe(self.peerConn);
++ }, 1000);*/
+ });
+ } else if (this.peerConn.iceConnectionState == 'failed') {
+ this.hangup('ice_failed');
+@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
+
+ MatrixCall.prototype.onRemoteStreamEnded = function(event) {
+ console.log("Remote stream ended");
++ return;
+ var self = this;
+ $rootScope.$apply(function() {
+ self.state = 'ended';
+diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
+index 55dbbf5..272fa27 100644
+--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
++++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
+@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
+ return;
+ }
+
++ // do we already have an entry for this call ID?
++ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
++ if (existingEntry) {
++ existingEntry.receivedInvite(msg);
++ return;
++ }
++
+ var call = undefined;
+ if (!isLive) {
+ // if this event wasn't live then this call may already be over
+@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
+ call.hangup();
+ }
+ } else {
+- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
++ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
+ }
+ } else if (event.type == 'm.call.answer') {
+ var call = matrixPhoneService.allCalls[msg.call_id];
diff --git a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js
new file mode 100644
index 00000000..e99dd7bf
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js
@@ -0,0 +1,712 @@
+/* jshint -W117 */
+// SDP STUFF
+function SDP(sdp) {
+ this.media = sdp.split('\r\nm=');
+ for (var i = 1; i < this.media.length; i++) {
+ this.media[i] = 'm=' + this.media[i];
+ if (i != this.media.length - 1) {
+ this.media[i] += '\r\n';
+ }
+ }
+ this.session = this.media.shift() + '\r\n';
+ this.raw = this.session + this.media.join('');
+}
+
+exports.SDP = SDP;
+
+var jsdom = require("jsdom");
+var window = jsdom.jsdom().parentWindow;
+var $ = require('jquery')(window);
+
+var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
+
+/**
+ * Returns map of MediaChannel mapped per channel idx.
+ */
+SDP.prototype.getMediaSsrcMap = function() {
+ var self = this;
+ var media_ssrcs = {};
+ for (channelNum = 0; channelNum < self.media.length; channelNum++) {
+ modified = true;
+ tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
+ var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
+ var channel = new MediaChannel(channelNum, type);
+ media_ssrcs[channelNum] = channel;
+ tmp.forEach(function (line) {
+ var linessrc = line.substring(7).split(' ')[0];
+ // allocate new ChannelSsrc
+ if(!channel.ssrcs[linessrc]) {
+ channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
+ }
+ channel.ssrcs[linessrc].lines.push(line);
+ });
+ tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
+ tmp.forEach(function(line){
+ var semantics = line.substr(0, idx).substr(13);
+ var ssrcs = line.substr(14 + semantics.length).split(' ');
+ if (ssrcs.length != 0) {
+ var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
+ channel.ssrcGroups.push(ssrcGroup);
+ }
+ });
+ }
+ return media_ssrcs;
+};
+/**
+ * Returns <tt>true</tt> if this SDP contains given SSRC.
+ * @param ssrc the ssrc to check.
+ * @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
+ */
+SDP.prototype.containsSSRC = function(ssrc) {
+ var channels = this.getMediaSsrcMap();
+ var contains = false;
+ Object.keys(channels).forEach(function(chNumber){
+ var channel = channels[chNumber];
+ //console.log("Check", channel, ssrc);
+ if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
+ contains = true;
+ }
+ });
+ return contains;
+};
+
+/**
+ * Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
+ * @param otherSdp the other SDP to check ssrc with.
+ */
+SDP.prototype.getNewMedia = function(otherSdp) {
+
+ // this could be useful in Array.prototype.
+ function arrayEquals(array) {
+ // if the other array is a falsy value, return
+ if (!array)
+ return false;
+
+ // compare lengths - can save a lot of time
+ if (this.length != array.length)
+ return false;
+
+ for (var i = 0, l=this.length; i < l; i++) {
+ // Check if we have nested arrays
+ if (this[i] instanceof Array && array[i] instanceof Array) {
+ // recurse into the nested arrays
+ if (!this[i].equals(array[i]))
+ return false;
+ }
+ else if (this[i] != array[i]) {
+ // Warning - two different object instances will never be equal: {x:20} != {x:20}
+ return false;
+ }
+ }
+ return true;
+ }
+
+ var myMedia = this.getMediaSsrcMap();
+ var othersMedia = otherSdp.getMediaSsrcMap();
+ var newMedia = {};
+ Object.keys(othersMedia).forEach(function(channelNum) {
+ var myChannel = myMedia[channelNum];
+ var othersChannel = othersMedia[channelNum];
+ if(!myChannel && othersChannel) {
+ // Add whole channel
+ newMedia[channelNum] = othersChannel;
+ return;
+ }
+ // Look for new ssrcs accross the channel
+ Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
+ if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
+ // Allocate channel if we've found ssrc that doesn't exist in our channel
+ if(!newMedia[channelNum]){
+ newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
+ }
+ newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
+ }
+ });
+
+ // Look for new ssrc groups across the channels
+ othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
+
+ // try to match the other ssrc-group with an ssrc-group of ours
+ var matched = false;
+ for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
+ var mySsrcGroup = myChannel.ssrcGroups[i];
+ if (otherSsrcGroup.semantics == mySsrcGroup.semantics
+ && arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
+
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched) {
+ // Allocate channel if we've found an ssrc-group that doesn't
+ // exist in our channel
+
+ if(!newMedia[channelNum]){
+ newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
+ }
+ newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
+ }
+ });
+ });
+ return newMedia;
+};
+
+// remove iSAC and CN from SDP
+SDP.prototype.mangle = function () {
+ var i, j, mline, lines, rtpmap, newdesc;
+ for (i = 0; i < this.media.length; i++) {
+ lines = this.media[i].split('\r\n');
+ lines.pop(); // remove empty last element
+ mline = SDPUtil.parse_mline(lines.shift());
+ if (mline.media != 'audio')
+ continue;
+ newdesc = '';
+ mline.fmt.length = 0;
+ for (j = 0; j < lines.length; j++) {
+ if (lines[j].substr(0, 9) == 'a=rtpmap:') {
+ rtpmap = SDPUtil.parse_rtpmap(lines[j]);
+ if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
+ continue;
+ mline.fmt.push(rtpmap.id);
+ newdesc += lines[j] + '\r\n';
+ } else {
+ newdesc += lines[j] + '\r\n';
+ }
+ }
+ this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
+ this.media[i] += newdesc;
+ }
+ this.raw = this.session + this.media.join('');
+};
+
+// remove lines matching prefix from session section
+SDP.prototype.removeSessionLines = function(prefix) {
+ var self = this;
+ var lines = SDPUtil.find_lines(this.session, prefix);
+ lines.forEach(function(line) {
+ self.session = self.session.replace(line + '\r\n', '');
+ });
+ this.raw = this.session + this.media.join('');
+ return lines;
+}
+// remove lines matching prefix from a media section specified by mediaindex
+// TODO: non-numeric mediaindex could match mid
+SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
+ var self = this;
+ var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
+ lines.forEach(function(line) {
+ self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
+ });
+ this.raw = this.session + this.media.join('');
+ return lines;
+}
+
+// add content's to a jingle element
+SDP.prototype.toJingle = function (elem, thecreator) {
+ var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
+ var self = this;
+ // new bundle plan
+ if (SDPUtil.find_line(this.session, 'a=group:')) {
+ lines = SDPUtil.find_lines(this.session, 'a=group:');
+ for (i = 0; i < lines.length; i++) {
+ tmp = lines[i].split(' ');
+ var semantics = tmp.shift().substr(8);
+ elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
+ for (j = 0; j < tmp.length; j++) {
+ elem.c('content', {name: tmp[j]}).up();
+ }
+ elem.up();
+ }
+ }
+ // old bundle plan, to be removed
+ var bundle = [];
+ if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
+ bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
+ bundle.shift();
+ }
+ for (i = 0; i < this.media.length; i++) {
+ mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
+ if (!(mline.media === 'audio' ||
+ mline.media === 'video' ||
+ mline.media === 'application'))
+ {
+ continue;
+ }
+ if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
+ ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
+ } else {
+ ssrc = false;
+ }
+
+ elem.c('content', {creator: thecreator, name: mline.media});
+ if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
+ // prefer identifier from a=mid if present
+ var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
+ elem.attrs({ name: mid });
+
+ // old BUNDLE plan, to be removed
+ if (bundle.indexOf(mid) !== -1) {
+ elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
+ bundle.splice(bundle.indexOf(mid), 1);
+ }
+ }
+
+ if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
+ {
+ elem.c('description',
+ {xmlns: 'urn:xmpp:jingle:apps:rtp:1',
+ media: mline.media });
+ if (ssrc) {
+ elem.attrs({ssrc: ssrc});
+ }
+ for (j = 0; j < mline.fmt.length; j++) {
+ rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
+ elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
+ // put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
+ if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
+ tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
+ for (k = 0; k < tmp.length; k++) {
+ elem.c('parameter', tmp[k]).up();
+ }
+ }
+ this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
+
+ elem.up();
+ }
+ if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
+ elem.c('encryption', {required: 1});
+ var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
+ crypto.forEach(function(line) {
+ elem.c('crypto', SDPUtil.parse_crypto(line)).up();
+ });
+ elem.up(); // end of encryption
+ }
+
+ if (ssrc) {
+ // new style mapping
+ elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
+ // FIXME: group by ssrc and support multiple different ssrcs
+ var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
+ ssrclines.forEach(function(line) {
+ idx = line.indexOf(' ');
+ var linessrc = line.substr(0, idx).substr(7);
+ if (linessrc != ssrc) {
+ elem.up();
+ ssrc = linessrc;
+ elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
+ }
+ var kv = line.substr(idx + 1);
+ elem.c('parameter');
+ if (kv.indexOf(':') == -1) {
+ elem.attrs({ name: kv });
+ } else {
+ elem.attrs({ name: kv.split(':', 2)[0] });
+ elem.attrs({ value: kv.split(':', 2)[1] });
+ }
+ elem.up();
+ });
+ elem.up();
+
+ // old proprietary mapping, to be removed at some point
+ tmp = SDPUtil.parse_ssrc(this.media[i]);
+ tmp.xmlns = 'http://estos.de/ns/ssrc';
+ tmp.ssrc = ssrc;
+ elem.c('ssrc', tmp).up(); // ssrc is part of description
+
+ // XEP-0339 handle ssrc-group attributes
+ var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
+ ssrc_group_lines.forEach(function(line) {
+ idx = line.indexOf(' ');
+ var semantics = line.substr(0, idx).substr(13);
+ var ssrcs = line.substr(14 + semantics.length).split(' ');
+ if (ssrcs.length != 0) {
+ elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
+ ssrcs.forEach(function(ssrc) {
+ elem.c('source', { ssrc: ssrc })
+ .up();
+ });
+ elem.up();
+ }
+ });
+ }
+
+ if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
+ elem.c('rtcp-mux').up();
+ }
+
+ // XEP-0293 -- map a=rtcp-fb:*
+ this.RtcpFbToJingle(i, elem, '*');
+
+ // XEP-0294
+ if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
+ lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
+ for (j = 0; j < lines.length; j++) {
+ tmp = SDPUtil.parse_extmap(lines[j]);
+ elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
+ uri: tmp.uri,
+ id: tmp.value });
+ if (tmp.hasOwnProperty('direction')) {
+ switch (tmp.direction) {
+ case 'sendonly':
+ elem.attrs({senders: 'responder'});
+ break;
+ case 'recvonly':
+ elem.attrs({senders: 'initiator'});
+ break;
+ case 'sendrecv':
+ elem.attrs({senders: 'both'});
+ break;
+ case 'inactive':
+ elem.attrs({senders: 'none'});
+ break;
+ }
+ }
+ // TODO: handle params
+ elem.up();
+ }
+ }
+ elem.up(); // end of description
+ }
+
+ // map ice-ufrag/pwd, dtls fingerprint, candidates
+ this.TransportToJingle(i, elem);
+
+ if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
+ elem.attrs({senders: 'both'});
+ } else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
+ elem.attrs({senders: 'initiator'});
+ } else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
+ elem.attrs({senders: 'responder'});
+ } else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
+ elem.attrs({senders: 'none'});
+ }
+ if (mline.port == '0') {
+ // estos hack to reject an m-line
+ elem.attrs({senders: 'rejected'});
+ }
+ elem.up(); // end of content
+ }
+ elem.up();
+ return elem;
+};
+
+SDP.prototype.TransportToJingle = function (mediaindex, elem) {
+ var i = mediaindex;
+ var tmp;
+ var self = this;
+ elem.c('transport');
+
+ // XEP-0343 DTLS/SCTP
+ if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
+ {
+ var sctpmap = SDPUtil.find_line(
+ this.media[i], 'a=sctpmap:', self.session);
+ if (sctpmap)
+ {
+ var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
+ elem.c('sctpmap',
+ {
+ xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
+ number: sctpAttrs[0], /* SCTP port */
+ protocol: sctpAttrs[1], /* protocol */
+ });
+ // Optional stream count attribute
+ if (sctpAttrs.length > 2)
+ elem.attrs({ streams: sctpAttrs[2]});
+ elem.up();
+ }
+ }
+ // XEP-0320
+ var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
+ fingerprints.forEach(function(line) {
+ tmp = SDPUtil.parse_fingerprint(line);
+ tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
+ elem.c('fingerprint').t(tmp.fingerprint);
+ delete tmp.fingerprint;
+ line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
+ if (line) {
+ tmp.setup = line.substr(8);
+ }
+ elem.attrs(tmp);
+ elem.up(); // end of fingerprint
+ });
+ tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
+ if (tmp) {
+ tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
+ elem.attrs(tmp);
+ // XEP-0176
+ if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
+ var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
+ lines.forEach(function (line) {
+ elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
+ });
+ }
+ }
+ elem.up(); // end of transport
+}
+
+SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
+ var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
+ lines.forEach(function (line) {
+ var tmp = SDPUtil.parse_rtcpfb(line);
+ if (tmp.type == 'trr-int') {
+ elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
+ elem.up();
+ } else {
+ elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
+ if (tmp.params.length > 0) {
+ elem.attrs({'subtype': tmp.params[0]});
+ }
+ elem.up();
+ }
+ });
+};
+
+SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
+ var media = '';
+ var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
+ if (tmp.length) {
+ media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
+ if (tmp.attr('value')) {
+ media += tmp.attr('value');
+ } else {
+ media += '0';
+ }
+ media += '\r\n';
+ }
+ tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
+ tmp.each(function () {
+ media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
+ if ($(this).attr('subtype')) {
+ media += ' ' + $(this).attr('subtype');
+ }
+ media += '\r\n';
+ });
+ return media;
+};
+
+// construct an SDP from a jingle stanza
+SDP.prototype.fromJingle = function (jingle) {
+ var self = this;
+ this.raw = 'v=0\r\n' +
+ 'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
+ 's=-\r\n' +
+ 't=0 0\r\n';
+ // http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
+ if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
+ $(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
+ var contents = $(group).find('>content').map(function (idx, content) {
+ return content.getAttribute('name');
+ }).get();
+ if (contents.length > 0) {
+ self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
+ }
+ });
+ } else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
+ // temporary namespace, not to be used. to be removed soon.
+ $(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
+ var contents = $(group).find('>content').map(function (idx, content) {
+ return content.getAttribute('name');
+ }).get();
+ if (group.getAttribute('type') !== null && contents.length > 0) {
+ self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
+ }
+ });
+ } else {
+ // for backward compability, to be removed soon
+ // assume all contents are in the same bundle group, can be improved upon later
+ var bundle = $(jingle).find('>content').filter(function (idx, content) {
+ //elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
+ return $(content).find('>bundle').length > 0;
+ }).map(function (idx, content) {
+ return content.getAttribute('name');
+ }).get();
+ if (bundle.length) {
+ this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
+ }
+ }
+
+ this.session = this.raw;
+ jingle.find('>content').each(function () {
+ var m = self.jingle2media($(this));
+ self.media.push(m);
+ });
+
+ // reconstruct msid-semantic -- apparently not necessary
+ /*
+ var msid = SDPUtil.parse_ssrc(this.raw);
+ if (msid.hasOwnProperty('mslabel')) {
+ this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
+ }
+ */
+
+ this.raw = this.session + this.media.join('');
+};
+
+// translate a jingle content element into an an SDP media part
+SDP.prototype.jingle2media = function (content) {
+ var media = '',
+ desc = content.find('description'),
+ ssrc = desc.attr('ssrc'),
+ self = this,
+ tmp;
+ var sctp = content.find(
+ '>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
+
+ tmp = { media: desc.attr('media') };
+ tmp.port = '1';
+ if (content.attr('senders') == 'rejected') {
+ // estos hack to reject an m-line.
+ tmp.port = '0';
+ }
+ if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
+ if (sctp.length)
+ tmp.proto = 'DTLS/SCTP';
+ else
+ tmp.proto = 'RTP/SAVPF';
+ } else {
+ tmp.proto = 'RTP/AVPF';
+ }
+ if (!sctp.length)
+ {
+ tmp.fmt = desc.find('payload-type').map(
+ function () { return this.getAttribute('id'); }).get();
+ media += SDPUtil.build_mline(tmp) + '\r\n';
+ }
+ else
+ {
+ media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
+ media += 'a=sctpmap:' + sctp.attr('number') +
+ ' ' + sctp.attr('protocol');
+
+ var streamCount = sctp.attr('streams');
+ if (streamCount)
+ media += ' ' + streamCount + '\r\n';
+ else
+ media += '\r\n';
+ }
+
+ media += 'c=IN IP4 0.0.0.0\r\n';
+ if (!sctp.length)
+ media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
+ //tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
+ tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
+ //console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
+ //console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
+ //console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
+ if (tmp.length) {
+ if (tmp.attr('ufrag')) {
+ media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
+ }
+ if (tmp.attr('pwd')) {
+ media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
+ }
+ tmp.find('>fingerprint').each(function () {
+ // FIXME: check namespace at some point
+ media += 'a=fingerprint:' + this.getAttribute('hash');
+ media += ' ' + $(this).text();
+ media += '\r\n';
+ //console.log("mline "+media);
+ if (this.getAttribute('setup')) {
+ media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
+ }
+ });
+ }
+ switch (content.attr('senders')) {
+ case 'initiator':
+ media += 'a=sendonly\r\n';
+ break;
+ case 'responder':
+ media += 'a=recvonly\r\n';
+ break;
+ case 'none':
+ media += 'a=inactive\r\n';
+ break;
+ case 'both':
+ media += 'a=sendrecv\r\n';
+ break;
+ }
+ media += 'a=mid:' + content.attr('name') + '\r\n';
+ /*if (content.attr('name') == 'video') {
+ media += 'a=x-google-flag:conference' + '\r\n';
+ }*/
+
+ // <description><rtcp-mux/></description>
+ // see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
+ // and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
+ if (desc.find('rtcp-mux').length) {
+ media += 'a=rtcp-mux\r\n';
+ }
+
+ if (desc.find('encryption').length) {
+ desc.find('encryption>crypto').each(function () {
+ media += 'a=crypto:' + this.getAttribute('tag');
+ media += ' ' + this.getAttribute('crypto-suite');
+ media += ' ' + this.getAttribute('key-params');
+ if (this.getAttribute('session-params')) {
+ media += ' ' + this.getAttribute('session-params');
+ }
+ media += '\r\n';
+ });
+ }
+ desc.find('payload-type').each(function () {
+ media += SDPUtil.build_rtpmap(this) + '\r\n';
+ if ($(this).find('>parameter').length) {
+ media += 'a=fmtp:' + this.getAttribute('id') + ' ';
+ media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
+ media += '\r\n';
+ }
+ // xep-0293
+ media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
+ });
+
+ // xep-0293
+ media += self.RtcpFbFromJingle(desc, '*');
+
+ // xep-0294
+ tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
+ tmp.each(function () {
+ media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
+ });
+
+ content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
+ media += SDPUtil.candidateFromJingle(this);
+ });
+
+ // XEP-0339 handle ssrc-group attributes
+ tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
+ var semantics = this.getAttribute('semantics');
+ var ssrcs = $(this).find('>source').map(function() {
+ return this.getAttribute('ssrc');
+ }).get();
+
+ if (ssrcs.length != 0) {
+ media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
+ }
+ });
+
+ tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
+ tmp.each(function () {
+ var ssrc = this.getAttribute('ssrc');
+ $(this).find('>parameter').each(function () {
+ media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
+ if (this.getAttribute('value') && this.getAttribute('value').length)
+ media += ':' + this.getAttribute('value');
+ media += '\r\n';
+ });
+ });
+
+ if (tmp.length === 0) {
+ // fallback to proprietary mapping of a=ssrc lines
+ tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
+ if (tmp.length) {
+ media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
+ media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
+ media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
+ media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
+ }
+ }
+ return media;
+};
+
diff --git a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js
new file mode 100644
index 00000000..042a123c
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js
@@ -0,0 +1,408 @@
+/**
+ * Contains utility classes used in SDP class.
+ *
+ */
+
+/**
+ * Class holds a=ssrc lines and media type a=mid
+ * @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
+ * @param type media type eg. "audio" or "video"(a=mid frm SDP)
+ * @constructor
+ */
+function ChannelSsrc(ssrc, type) {
+ this.ssrc = ssrc;
+ this.type = type;
+ this.lines = [];
+}
+
+/**
+ * Class holds a=ssrc-group: lines
+ * @param semantics
+ * @param ssrcs
+ * @constructor
+ */
+function ChannelSsrcGroup(semantics, ssrcs, line) {
+ this.semantics = semantics;
+ this.ssrcs = ssrcs;
+}
+
+/**
+ * Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
+ * @param channelNumber channel idx in SDP media array.
+ * @param mediaType media type(a=mid)
+ * @constructor
+ */
+function MediaChannel(channelNumber, mediaType) {
+ /**
+ * SDP channel number
+ * @type {*}
+ */
+ this.chNumber = channelNumber;
+ /**
+ * Channel media type(a=mid)
+ * @type {*}
+ */
+ this.mediaType = mediaType;
+ /**
+ * The maps of ssrc numbers to ChannelSsrc objects.
+ */
+ this.ssrcs = {};
+
+ /**
+ * The array of ChannelSsrcGroup objects.
+ * @type {Array}
+ */
+ this.ssrcGroups = [];
+}
+
+SDPUtil = {
+ iceparams: function (mediadesc, sessiondesc) {
+ var data = null;
+ if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
+ SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
+ data = {
+ ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
+ pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
+ };
+ }
+ return data;
+ },
+ parse_iceufrag: function (line) {
+ return line.substring(12);
+ },
+ build_iceufrag: function (frag) {
+ return 'a=ice-ufrag:' + frag;
+ },
+ parse_icepwd: function (line) {
+ return line.substring(10);
+ },
+ build_icepwd: function (pwd) {
+ return 'a=ice-pwd:' + pwd;
+ },
+ parse_mid: function (line) {
+ return line.substring(6);
+ },
+ parse_mline: function (line) {
+ var parts = line.substring(2).split(' '),
+ data = {};
+ data.media = parts.shift();
+ data.port = parts.shift();
+ data.proto = parts.shift();
+ if (parts[parts.length - 1] === '') { // trailing whitespace
+ parts.pop();
+ }
+ data.fmt = parts;
+ return data;
+ },
+ build_mline: function (mline) {
+ return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
+ },
+ parse_rtpmap: function (line) {
+ var parts = line.substring(9).split(' '),
+ data = {};
+ data.id = parts.shift();
+ parts = parts[0].split('/');
+ data.name = parts.shift();
+ data.clockrate = parts.shift();
+ data.channels = parts.length ? parts.shift() : '1';
+ return data;
+ },
+ /**
+ * Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
+ * @param line eg. "a=sctpmap:5000 webrtc-datachannel"
+ * @returns [SCTP port number, protocol, streams]
+ */
+ parse_sctpmap: function (line)
+ {
+ var parts = line.substring(10).split(' ');
+ var sctpPort = parts[0];
+ var protocol = parts[1];
+ // Stream count is optional
+ var streamCount = parts.length > 2 ? parts[2] : null;
+ return [sctpPort, protocol, streamCount];// SCTP port
+ },
+ build_rtpmap: function (el) {
+ var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
+ if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
+ line += '/' + el.getAttribute('channels');
+ }
+ return line;
+ },
+ parse_crypto: function (line) {
+ var parts = line.substring(9).split(' '),
+ data = {};
+ data.tag = parts.shift();
+ data['crypto-suite'] = parts.shift();
+ data['key-params'] = parts.shift();
+ if (parts.length) {
+ data['session-params'] = parts.join(' ');
+ }
+ return data;
+ },
+ parse_fingerprint: function (line) { // RFC 4572
+ var parts = line.substring(14).split(' '),
+ data = {};
+ data.hash = parts.shift();
+ data.fingerprint = parts.shift();
+ // TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
+ return data;
+ },
+ parse_fmtp: function (line) {
+ var parts = line.split(' '),
+ i, key, value,
+ data = [];
+ parts.shift();
+ parts = parts.join(' ').split(';');
+ for (i = 0; i < parts.length; i++) {
+ key = parts[i].split('=')[0];
+ while (key.length && key[0] == ' ') {
+ key = key.substring(1);
+ }
+ value = parts[i].split('=')[1];
+ if (key && value) {
+ data.push({name: key, value: value});
+ } else if (key) {
+ // rfc 4733 (DTMF) style stuff
+ data.push({name: '', value: key});
+ }
+ }
+ return data;
+ },
+ parse_icecandidate: function (line) {
+ var candidate = {},
+ elems = line.split(' ');
+ candidate.foundation = elems[0].substring(12);
+ candidate.component = elems[1];
+ candidate.protocol = elems[2].toLowerCase();
+ candidate.priority = elems[3];
+ candidate.ip = elems[4];
+ candidate.port = elems[5];
+ // elems[6] => "typ"
+ candidate.type = elems[7];
+ candidate.generation = 0; // default value, may be overwritten below
+ for (var i = 8; i < elems.length; i += 2) {
+ switch (elems[i]) {
+ case 'raddr':
+ candidate['rel-addr'] = elems[i + 1];
+ break;
+ case 'rport':
+ candidate['rel-port'] = elems[i + 1];
+ break;
+ case 'generation':
+ candidate.generation = elems[i + 1];
+ break;
+ case 'tcptype':
+ candidate.tcptype = elems[i + 1];
+ break;
+ default: // TODO
+ console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
+ }
+ }
+ candidate.network = '1';
+ candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
+ return candidate;
+ },
+ build_icecandidate: function (cand) {
+ var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
+ line += ' ';
+ switch (cand.type) {
+ case 'srflx':
+ case 'prflx':
+ case 'relay':
+ if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
+ line += 'raddr';
+ line += ' ';
+ line += cand['rel-addr'];
+ line += ' ';
+ line += 'rport';
+ line += ' ';
+ line += cand['rel-port'];
+ line += ' ';
+ }
+ break;
+ }
+ if (cand.hasOwnAttribute('tcptype')) {
+ line += 'tcptype';
+ line += ' ';
+ line += cand.tcptype;
+ line += ' ';
+ }
+ line += 'generation';
+ line += ' ';
+ line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
+ return line;
+ },
+ parse_ssrc: function (desc) {
+ // proprietary mapping of a=ssrc lines
+ // TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
+ // and parse according to that
+ var lines = desc.split('\r\n'),
+ data = {};
+ for (var i = 0; i < lines.length; i++) {
+ if (lines[i].substring(0, 7) == 'a=ssrc:') {
+ var idx = lines[i].indexOf(' ');
+ data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
+ }
+ }
+ return data;
+ },
+ parse_rtcpfb: function (line) {
+ var parts = line.substr(10).split(' ');
+ var data = {};
+ data.pt = parts.shift();
+ data.type = parts.shift();
+ data.params = parts;
+ return data;
+ },
+ parse_extmap: function (line) {
+ var parts = line.substr(9).split(' ');
+ var data = {};
+ data.value = parts.shift();
+ if (data.value.indexOf('/') != -1) {
+ data.direction = data.value.substr(data.value.indexOf('/') + 1);
+ data.value = data.value.substr(0, data.value.indexOf('/'));
+ } else {
+ data.direction = 'both';
+ }
+ data.uri = parts.shift();
+ data.params = parts;
+ return data;
+ },
+ find_line: function (haystack, needle, sessionpart) {
+ var lines = haystack.split('\r\n');
+ for (var i = 0; i < lines.length; i++) {
+ if (lines[i].substring(0, needle.length) == needle) {
+ return lines[i];
+ }
+ }
+ if (!sessionpart) {
+ return false;
+ }
+ // search session part
+ lines = sessionpart.split('\r\n');
+ for (var j = 0; j < lines.length; j++) {
+ if (lines[j].substring(0, needle.length) == needle) {
+ return lines[j];
+ }
+ }
+ return false;
+ },
+ find_lines: function (haystack, needle, sessionpart) {
+ var lines = haystack.split('\r\n'),
+ needles = [];
+ for (var i = 0; i < lines.length; i++) {
+ if (lines[i].substring(0, needle.length) == needle)
+ needles.push(lines[i]);
+ }
+ if (needles.length || !sessionpart) {
+ return needles;
+ }
+ // search session part
+ lines = sessionpart.split('\r\n');
+ for (var j = 0; j < lines.length; j++) {
+ if (lines[j].substring(0, needle.length) == needle) {
+ needles.push(lines[j]);
+ }
+ }
+ return needles;
+ },
+ candidateToJingle: function (line) {
+ // a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
+ // <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
+ if (line.indexOf('candidate:') === 0) {
+ line = 'a=' + line;
+ } else if (line.substring(0, 12) != 'a=candidate:') {
+ console.log('parseCandidate called with a line that is not a candidate line');
+ console.log(line);
+ return null;
+ }
+ if (line.substring(line.length - 2) == '\r\n') // chomp it
+ line = line.substring(0, line.length - 2);
+ var candidate = {},
+ elems = line.split(' '),
+ i;
+ if (elems[6] != 'typ') {
+ console.log('did not find typ in the right place');
+ console.log(line);
+ return null;
+ }
+ candidate.foundation = elems[0].substring(12);
+ candidate.component = elems[1];
+ candidate.protocol = elems[2].toLowerCase();
+ candidate.priority = elems[3];
+ candidate.ip = elems[4];
+ candidate.port = elems[5];
+ // elems[6] => "typ"
+ candidate.type = elems[7];
+
+ candidate.generation = '0'; // default, may be overwritten below
+ for (i = 8; i < elems.length; i += 2) {
+ switch (elems[i]) {
+ case 'raddr':
+ candidate['rel-addr'] = elems[i + 1];
+ break;
+ case 'rport':
+ candidate['rel-port'] = elems[i + 1];
+ break;
+ case 'generation':
+ candidate.generation = elems[i + 1];
+ break;
+ case 'tcptype':
+ candidate.tcptype = elems[i + 1];
+ break;
+ default: // TODO
+ console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
+ }
+ }
+ candidate.network = '1';
+ candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
+ return candidate;
+ },
+ candidateFromJingle: function (cand) {
+ var line = 'a=candidate:';
+ line += cand.getAttribute('foundation');
+ line += ' ';
+ line += cand.getAttribute('component');
+ line += ' ';
+ line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
+ line += ' ';
+ line += cand.getAttribute('priority');
+ line += ' ';
+ line += cand.getAttribute('ip');
+ line += ' ';
+ line += cand.getAttribute('port');
+ line += ' ';
+ line += 'typ';
+ line += ' ' + cand.getAttribute('type');
+ line += ' ';
+ switch (cand.getAttribute('type')) {
+ case 'srflx':
+ case 'prflx':
+ case 'relay':
+ if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
+ line += 'raddr';
+ line += ' ';
+ line += cand.getAttribute('rel-addr');
+ line += ' ';
+ line += 'rport';
+ line += ' ';
+ line += cand.getAttribute('rel-port');
+ line += ' ';
+ }
+ break;
+ }
+ if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
+ line += 'tcptype';
+ line += ' ';
+ line += cand.getAttribute('tcptype');
+ line += ' ';
+ }
+ line += 'generation';
+ line += ' ';
+ line += cand.getAttribute('generation') || '0';
+ return line + '\r\n';
+ }
+};
+
+exports.SDPUtil = SDPUtil;
+
diff --git a/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js b/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
new file mode 100644
index 00000000..9c45c2df
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
@@ -0,0 +1,254 @@
+/**
+ * Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
+ *
+ * This can be used with JS designed for browsers to improve reuse of code and
+ * allow the use of existing libraries.
+ *
+ * Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
+ *
+ * @todo SSL Support
+ * @author Dan DeFelippi <dan@driverdan.com>
+ * @license MIT
+ */
+
+var Url = require("url")
+ ,sys = require("util");
+
+exports.XMLHttpRequest = function() {
+ /**
+ * Private variables
+ */
+ var self = this;
+ var http = require('http');
+ var https = require('https');
+
+ // Holds http.js objects
+ var client;
+ var request;
+ var response;
+
+ // Request settings
+ var settings = {};
+
+ // Set some default headers
+ var defaultHeaders = {
+ "User-Agent": "node.js",
+ "Accept": "*/*",
+ };
+
+ var headers = defaultHeaders;
+
+ /**
+ * Constants
+ */
+ this.UNSENT = 0;
+ this.OPENED = 1;
+ this.HEADERS_RECEIVED = 2;
+ this.LOADING = 3;
+ this.DONE = 4;
+
+ /**
+ * Public vars
+ */
+ // Current state
+ this.readyState = this.UNSENT;
+
+ // default ready state change handler in case one is not set or is set late
+ this.onreadystatechange = function() {};
+
+ // Result & response
+ this.responseText = "";
+ this.responseXML = "";
+ this.status = null;
+ this.statusText = null;
+
+ /**
+ * Open the connection. Currently supports local server requests.
+ *
+ * @param string method Connection method (eg GET, POST)
+ * @param string url URL for the connection.
+ * @param boolean async Asynchronous connection. Default is true.
+ * @param string user Username for basic authentication (optional)
+ * @param string password Password for basic authentication (optional)
+ */
+ this.open = function(method, url, async, user, password) {
+ settings = {
+ "method": method,
+ "url": url,
+ "async": async || null,
+ "user": user || null,
+ "password": password || null
+ };
+
+ this.abort();
+
+ setState(this.OPENED);
+ };
+
+ /**
+ * Sets a header for the request.
+ *
+ * @param string header Header name
+ * @param string value Header value
+ */
+ this.setRequestHeader = function(header, value) {
+ headers[header] = value;
+ };
+
+ /**
+ * Gets a header from the server response.
+ *
+ * @param string header Name of header to get.
+ * @return string Text of the header or null if it doesn't exist.
+ */
+ this.getResponseHeader = function(header) {
+ if (this.readyState > this.OPENED && response.headers[header]) {
+ return header + ": " + response.headers[header];
+ }
+
+ return null;
+ };
+
+ /**
+ * Gets all the response headers.
+ *
+ * @return string
+ */
+ this.getAllResponseHeaders = function() {
+ if (this.readyState < this.HEADERS_RECEIVED) {
+ throw "INVALID_STATE_ERR: Headers have not been received.";
+ }
+ var result = "";
+
+ for (var i in response.headers) {
+ result += i + ": " + response.headers[i] + "\r\n";
+ }
+ return result.substr(0, result.length - 2);
+ };
+
+ /**
+ * Sends the request to the server.
+ *
+ * @param string data Optional data to send as request body.
+ */
+ this.send = function(data) {
+ if (this.readyState != this.OPENED) {
+ throw "INVALID_STATE_ERR: connection must be opened before send() is called";
+ }
+
+ var ssl = false;
+ var url = Url.parse(settings.url);
+
+ // Determine the server
+ switch (url.protocol) {
+ case 'https:':
+ ssl = true;
+ // SSL & non-SSL both need host, no break here.
+ case 'http:':
+ var host = url.hostname;
+ break;
+
+ case undefined:
+ case '':
+ var host = "localhost";
+ break;
+
+ default:
+ throw "Protocol not supported.";
+ }
+
+ // Default to port 80. If accessing localhost on another port be sure
+ // to use http://localhost:port/path
+ var port = url.port || (ssl ? 443 : 80);
+ // Add query string if one is used
+ var uri = url.pathname + (url.search ? url.search : '');
+
+ // Set the Host header or the server may reject the request
+ this.setRequestHeader("Host", host);
+
+ // Set content length header
+ if (settings.method == "GET" || settings.method == "HEAD") {
+ data = null;
+ } else if (data) {
+ this.setRequestHeader("Content-Length", Buffer.byteLength(data));
+
+ if (!headers["Content-Type"]) {
+ this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
+ }
+ }
+
+ // Use the proper protocol
+ var doRequest = ssl ? https.request : http.request;
+
+ var options = {
+ host: host,
+ port: port,
+ path: uri,
+ method: settings.method,
+ headers: headers,
+ agent: false
+ };
+
+ var req = doRequest(options, function(res) {
+ response = res;
+ response.setEncoding("utf8");
+
+ setState(self.HEADERS_RECEIVED);
+ self.status = response.statusCode;
+
+ response.on('data', function(chunk) {
+ // Make sure there's some data
+ if (chunk) {
+ self.responseText += chunk;
+ }
+ setState(self.LOADING);
+ });
+
+ response.on('end', function() {
+ setState(self.DONE);
+ });
+
+ response.on('error', function() {
+ self.handleError(error);
+ });
+ }).on('error', function(error) {
+ self.handleError(error);
+ });
+
+ req.setHeader("Connection", "Close");
+
+ // Node 0.4 and later won't accept empty data. Make sure it's needed.
+ if (data) {
+ req.write(data);
+ }
+
+ req.end();
+ };
+
+ this.handleError = function(error) {
+ this.status = 503;
+ this.statusText = error;
+ this.responseText = error.stack;
+ setState(this.DONE);
+ };
+
+ /**
+ * Aborts a request.
+ */
+ this.abort = function() {
+ headers = defaultHeaders;
+ this.readyState = this.UNSENT;
+ this.responseText = "";
+ this.responseXML = "";
+ };
+
+ /**
+ * Changes readyState and calls onreadystatechange.
+ *
+ * @param int state New state
+ */
+ var setState = function(state) {
+ self.readyState = state;
+ self.onreadystatechange();
+ }
+};
diff --git a/contrib/jitsimeetbridge/unjingle/strophe/base64.js b/contrib/jitsimeetbridge/unjingle/strophe/base64.js
new file mode 100644
index 00000000..418caac0
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe/base64.js
@@ -0,0 +1,83 @@
+// This code was written by Tyler Akins and has been placed in the
+// public domain. It would be nice if you left this header intact.
+// Base64 code from Tyler Akins -- http://rumkin.com
+
+var Base64 = (function () {
+ var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
+
+ var obj = {
+ /**
+ * Encodes a string in base64
+ * @param {String} input The string to encode in base64.
+ */
+ encode: function (input) {
+ var output = "";
+ var chr1, chr2, chr3;
+ var enc1, enc2, enc3, enc4;
+ var i = 0;
+
+ do {
+ chr1 = input.charCodeAt(i++);
+ chr2 = input.charCodeAt(i++);
+ chr3 = input.charCodeAt(i++);
+
+ enc1 = chr1 >> 2;
+ enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
+ enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
+ enc4 = chr3 & 63;
+
+ if (isNaN(chr2)) {
+ enc3 = enc4 = 64;
+ } else if (isNaN(chr3)) {
+ enc4 = 64;
+ }
+
+ output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
+ } while (i < input.length);
+
+ return output;
+ },
+
+ /**
+ * Decodes a base64 string.
+ * @param {String} input The string to decode.
+ */
+ decode: function (input) {
+ var output = "";
+ var chr1, chr2, chr3;
+ var enc1, enc2, enc3, enc4;
+ var i = 0;
+
+ // remove all characters that are not A-Z, a-z, 0-9, +, /, or =
+ input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
+
+ do {
+ enc1 = keyStr.indexOf(input.charAt(i++));
+ enc2 = keyStr.indexOf(input.charAt(i++));
+ enc3 = keyStr.indexOf(input.charAt(i++));
+ enc4 = keyStr.indexOf(input.charAt(i++));
+
+ chr1 = (enc1 << 2) | (enc2 >> 4);
+ chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
+ chr3 = ((enc3 & 3) << 6) | enc4;
+
+ output = output + String.fromCharCode(chr1);
+
+ if (enc3 != 64) {
+ output = output + String.fromCharCode(chr2);
+ }
+ if (enc4 != 64) {
+ output = output + String.fromCharCode(chr3);
+ }
+ } while (i < input.length);
+
+ return output;
+ }
+ };
+
+ return obj;
+})();
+
+// Nodify
+exports.Base64 = Base64;
diff --git a/contrib/jitsimeetbridge/unjingle/strophe/md5.js b/contrib/jitsimeetbridge/unjingle/strophe/md5.js
new file mode 100644
index 00000000..5334325e
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe/md5.js
@@ -0,0 +1,279 @@
+/*
+ * A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
+ * Digest Algorithm, as defined in RFC 1321.
+ * Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
+ * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
+ * Distributed under the BSD License
+ * See http://pajhome.org.uk/crypt/md5 for more info.
+ */
+
+var MD5 = (function () {
+ /*
+ * Configurable variables. You may need to tweak these to be compatible with
+ * the server-side, but the defaults work in most cases.
+ */
+ var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
+ var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
+ var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
+
+ /*
+ * Add integers, wrapping at 2^32. This uses 16-bit operations internally
+ * to work around bugs in some JS interpreters.
+ */
+ var safe_add = function (x, y) {
+ var lsw = (x & 0xFFFF) + (y & 0xFFFF);
+ var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
+ return (msw << 16) | (lsw & 0xFFFF);
+ };
+
+ /*
+ * Bitwise rotate a 32-bit number to the left.
+ */
+ var bit_rol = function (num, cnt) {
+ return (num << cnt) | (num >>> (32 - cnt));
+ };
+
+ /*
+ * Convert a string to an array of little-endian words
+ * If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
+ */
+ var str2binl = function (str) {
+ var bin = [];
+ var mask = (1 << chrsz) - 1;
+ for(var i = 0; i < str.length * chrsz; i += chrsz)
+ {
+ bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
+ }
+ return bin;
+ };
+
+ /*
+ * Convert an array of little-endian words to a string
+ */
+ var binl2str = function (bin) {
+ var str = "";
+ var mask = (1 << chrsz) - 1;
+ for(var i = 0; i < bin.length * 32; i += chrsz)
+ {
+ str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
+ }
+ return str;
+ };
+
+ /*
+ * Convert an array of little-endian words to a hex string.
+ */
+ var binl2hex = function (binarray) {
+ var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
+ var str = "";
+ for(var i = 0; i < binarray.length * 4; i++)
+ {
+ str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
+ hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
+ }
+ return str;
+ };
+
+ /*
+ * Convert an array of little-endian words to a base-64 string
+ */
+ var binl2b64 = function (binarray) {
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var str = "";
+ var triplet, j;
+ for(var i = 0; i < binarray.length * 4; i += 3)
+ {
+ triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
+ (((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
+ ((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
+ for(j = 0; j < 4; j++)
+ {
+ if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
+ else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
+ }
+ }
+ return str;
+ };
+
+ /*
+ * These functions implement the four basic operations the algorithm uses.
+ */
+ var md5_cmn = function (q, a, b, x, s, t) {
+ return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
+ };
+
+ var md5_ff = function (a, b, c, d, x, s, t) {
+ return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
+ };
+
+ var md5_gg = function (a, b, c, d, x, s, t) {
+ return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
+ };
+
+ var md5_hh = function (a, b, c, d, x, s, t) {
+ return md5_cmn(b ^ c ^ d, a, b, x, s, t);
+ };
+
+ var md5_ii = function (a, b, c, d, x, s, t) {
+ return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
+ };
+
+ /*
+ * Calculate the MD5 of an array of little-endian words, and a bit length
+ */
+ var core_md5 = function (x, len) {
+ /* append padding */
+ x[len >> 5] |= 0x80 << ((len) % 32);
+ x[(((len + 64) >>> 9) << 4) + 14] = len;
+
+ var a = 1732584193;
+ var b = -271733879;
+ var c = -1732584194;
+ var d = 271733878;
+
+ var olda, oldb, oldc, oldd;
+ for (var i = 0; i < x.length; i += 16)
+ {
+ olda = a;
+ oldb = b;
+ oldc = c;
+ oldd = d;
+
+ a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
+ d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
+ c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
+ b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
+ a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
+ d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
+ c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
+ b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
+ a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
+ d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
+ c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
+ b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
+ a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
+ d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
+ c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
+ b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
+
+ a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
+ d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
+ c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
+ b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
+ a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
+ d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
+ c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
+ b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
+ a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
+ d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
+ c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
+ b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
+ a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
+ d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
+ c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
+ b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
+
+ a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
+ d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
+ c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
+ b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
+ a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
+ d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
+ c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
+ b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
+ a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
+ d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
+ c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
+ b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
+ a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
+ d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
+ c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
+ b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
+
+ a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
+ d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
+ c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
+ b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
+ a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
+ d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
+ c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
+ b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
+ a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
+ d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
+ c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
+ b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
+ a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
+ d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
+ c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
+ b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
+
+ a = safe_add(a, olda);
+ b = safe_add(b, oldb);
+ c = safe_add(c, oldc);
+ d = safe_add(d, oldd);
+ }
+ return [a, b, c, d];
+ };
+
+
+ /*
+ * Calculate the HMAC-MD5, of a key and some data
+ */
+ var core_hmac_md5 = function (key, data) {
+ var bkey = str2binl(key);
+ if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
+
+ var ipad = new Array(16), opad = new Array(16);
+ for(var i = 0; i < 16; i++)
+ {
+ ipad[i] = bkey[i] ^ 0x36363636;
+ opad[i] = bkey[i] ^ 0x5C5C5C5C;
+ }
+
+ var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
+ return core_md5(opad.concat(hash), 512 + 128);
+ };
+
+ var obj = {
+ /*
+ * These are the functions you'll usually want to call.
+ * They take string arguments and return either hex or base-64 encoded
+ * strings.
+ */
+ hexdigest: function (s) {
+ return binl2hex(core_md5(str2binl(s), s.length * chrsz));
+ },
+
+ b64digest: function (s) {
+ return binl2b64(core_md5(str2binl(s), s.length * chrsz));
+ },
+
+ hash: function (s) {
+ return binl2str(core_md5(str2binl(s), s.length * chrsz));
+ },
+
+ hmac_hexdigest: function (key, data) {
+ return binl2hex(core_hmac_md5(key, data));
+ },
+
+ hmac_b64digest: function (key, data) {
+ return binl2b64(core_hmac_md5(key, data));
+ },
+
+ hmac_hash: function (key, data) {
+ return binl2str(core_hmac_md5(key, data));
+ },
+
+ /*
+ * Perform a simple self-test to see if the VM is working
+ */
+ test: function () {
+ return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
+ }
+ };
+
+ return obj;
+})();
+
+// Nodify
+exports.MD5 = MD5;
diff --git a/contrib/jitsimeetbridge/unjingle/strophe/strophe.js b/contrib/jitsimeetbridge/unjingle/strophe/strophe.js
new file mode 100644
index 00000000..06d426cd
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/strophe/strophe.js
@@ -0,0 +1,3256 @@
+/*
+ This program is distributed under the terms of the MIT license.
+ Please see the LICENSE file for details.
+
+ Copyright 2006-2008, OGG, LLC
+*/
+
+/* jslint configuration: */
+/*global document, window, setTimeout, clearTimeout, console,
+ XMLHttpRequest, ActiveXObject,
+ Base64, MD5,
+ Strophe, $build, $msg, $iq, $pres */
+
+/** File: strophe.js
+ * A JavaScript library for XMPP BOSH.
+ *
+ * This is the JavaScript version of the Strophe library. Since JavaScript
+ * has no facilities for persistent TCP connections, this library uses
+ * Bidirectional-streams Over Synchronous HTTP (BOSH) to emulate
+ * a persistent, stateful, two-way connection to an XMPP server. More
+ * information on BOSH can be found in XEP 124.
+ */
+
+/** PrivateFunction: Function.prototype.bind
+ * Bind a function to an instance.
+ *
+ * This Function object extension method creates a bound method similar
+ * to those in Python. This means that the 'this' object will point
+ * to the instance you want. See
+ * <a href='https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Function/bind'>MDC's bind() documentation</a> and
+ * <a href='http://benjamin.smedbergs.us/blog/2007-01-03/bound-functions-and-function-imports-in-javascript/'>Bound Functions and Function Imports in JavaScript</a>
+ * for a complete explanation.
+ *
+ * This extension already exists in some browsers (namely, Firefox 3), but
+ * we provide it to support those that don't.
+ *
+ * Parameters:
+ * (Object) obj - The object that will become 'this' in the bound function.
+ * (Object) argN - An option argument that will be prepended to the
+ * arguments given for the function call
+ *
+ * Returns:
+ * The bound function.
+ */
+
+/* Make it work on node.js: Nodify
+ *
+ * Steps:
+ * 1. Create the global objects: window, document, Base64, MD5 and XMLHttpRequest
+ * 2. Use the node-XMLHttpRequest module.
+ * 3. Use jsdom for the document object - since it supports DOM functions.
+ * 4. Replace all calls to childNodes with _childNodes (since the former doesn't
+ * seem to work on jsdom).
+ * 5. While getting the response from XMLHttpRequest, manually convert the text
+ * data to XML.
+ * 6. All calls to nodeName should replaced by nodeName.toLowerCase() since jsdom
+ * seems to always convert node names to upper case.
+ *
+ */
+var XMLHttpRequest = require('./XMLHttpRequest.js').XMLHttpRequest;
+var Base64 = require('./base64.js').Base64;
+var MD5 = require('./md5.js').MD5;
+var jsdom = require("jsdom").jsdom;
+
+document = jsdom("<html><head></head><body></body></html>"),
+
+window = {
+ XMLHttpRequest: XMLHttpRequest,
+ Base64: Base64,
+ MD5: MD5
+};
+
+exports.Strophe = window;
+
+if (!Function.prototype.bind) {
+ Function.prototype.bind = function (obj /*, arg1, arg2, ... */)
+ {
+ var func = this;
+ var _slice = Array.prototype.slice;
+ var _concat = Array.prototype.concat;
+ var _args = _slice.call(arguments, 1);
+
+ return function () {
+ return func.apply(obj ? obj : this,
+ _concat.call(_args,
+ _slice.call(arguments, 0)));
+ };
+ };
+}
+
+/** PrivateFunction: Array.prototype.indexOf
+ * Return the index of an object in an array.
+ *
+ * This function is not supplied by some JavaScript implementations, so
+ * we provide it if it is missing. This code is from:
+ * http://developer.mozilla.org/En/Core_JavaScript_1.5_Reference:Objects:Array:indexOf
+ *
+ * Parameters:
+ * (Object) elt - The object to look for.
+ * (Integer) from - The index from which to start looking. (optional).
+ *
+ * Returns:
+ * The index of elt in the array or -1 if not found.
+ */
+if (!Array.prototype.indexOf)
+{
+ Array.prototype.indexOf = function(elt /*, from*/)
+ {
+ var len = this.length;
+
+ var from = Number(arguments[1]) || 0;
+ from = (from < 0) ? Math.ceil(from) : Math.floor(from);
+ if (from < 0) {
+ from += len;
+ }
+
+ for (; from < len; from++) {
+ if (from in this && this[from] === elt) {
+ return from;
+ }
+ }
+
+ return -1;
+ };
+}
+
+/* All of the Strophe globals are defined in this special function below so
+ * that references to the globals become closures. This will ensure that
+ * on page reload, these references will still be available to callbacks
+ * that are still executing.
+ */
+
+(function (callback) {
+var Strophe;
+
+/** Function: $build
+ * Create a Strophe.Builder.
+ * This is an alias for 'new Strophe.Builder(name, attrs)'.
+ *
+ * Parameters:
+ * (String) name - The root element name.
+ * (Object) attrs - The attributes for the root element in object notation.
+ *
+ * Returns:
+ * A new Strophe.Builder object.
+ */
+function $build(name, attrs) { return new Strophe.Builder(name, attrs); }
+/** Function: $msg
+ * Create a Strophe.Builder with a <message/> element as the root.
+ *
+ * Parmaeters:
+ * (Object) attrs - The <message/> element attributes in object notation.
+ *
+ * Returns:
+ * A new Strophe.Builder object.
+ */
+function $msg(attrs) { return new Strophe.Builder("message", attrs); }
+/** Function: $iq
+ * Create a Strophe.Builder with an <iq/> element as the root.
+ *
+ * Parameters:
+ * (Object) attrs - The <iq/> element attributes in object notation.
+ *
+ * Returns:
+ * A new Strophe.Builder object.
+ */
+function $iq(attrs) { return new Strophe.Builder("iq", attrs); }
+/** Function: $pres
+ * Create a Strophe.Builder with a <presence/> element as the root.
+ *
+ * Parameters:
+ * (Object) attrs - The <presence/> element attributes in object notation.
+ *
+ * Returns:
+ * A new Strophe.Builder object.
+ */
+function $pres(attrs) { return new Strophe.Builder("presence", attrs); }
+
+/** Class: Strophe
+ * An object container for all Strophe library functions.
+ *
+ * This class is just a container for all the objects and constants
+ * used in the library. It is not meant to be instantiated, but to
+ * provide a namespace for library objects, constants, and functions.
+ */
+Strophe = {
+ /** Constant: VERSION
+ * The version of the Strophe library. Unreleased builds will have
+ * a version of head-HASH where HASH is a partial revision.
+ */
+ VERSION: "@VERSION@",
+
+ /** Constants: XMPP Namespace Constants
+ * Common namespace constants from the XMPP RFCs and XEPs.
+ *
+ * NS.HTTPBIND - HTTP BIND namespace from XEP 124.
+ * NS.BOSH - BOSH namespace from XEP 206.
+ * NS.CLIENT - Main XMPP client namespace.
+ * NS.AUTH - Legacy authentication namespace.
+ * NS.ROSTER - Roster operations namespace.
+ * NS.PROFILE - Profile namespace.
+ * NS.DISCO_INFO - Service discovery info namespace from XEP 30.
+ * NS.DISCO_ITEMS - Service discovery items namespace from XEP 30.
+ * NS.MUC - Multi-User Chat namespace from XEP 45.
+ * NS.SASL - XMPP SASL namespace from RFC 3920.
+ * NS.STREAM - XMPP Streams namespace from RFC 3920.
+ * NS.BIND - XMPP Binding namespace from RFC 3920.
+ * NS.SESSION - XMPP Session namespace from RFC 3920.
+ */
+ NS: {
+ HTTPBIND: "http://jabber.org/protocol/httpbind",
+ BOSH: "urn:xmpp:xbosh",
+ CLIENT: "jabber:client",
+ AUTH: "jabber:iq:auth",
+ ROSTER: "jabber:iq:roster",
+ PROFILE: "jabber:iq:profile",
+ DISCO_INFO: "http://jabber.org/protocol/disco#info",
+ DISCO_ITEMS: "http://jabber.org/protocol/disco#items",
+ MUC: "http://jabber.org/protocol/muc",
+ SASL: "urn:ietf:params:xml:ns:xmpp-sasl",
+ STREAM: "http://etherx.jabber.org/streams",
+ BIND: "urn:ietf:params:xml:ns:xmpp-bind",
+ SESSION: "urn:ietf:params:xml:ns:xmpp-session",
+ VERSION: "jabber:iq:version",
+ STANZAS: "urn:ietf:params:xml:ns:xmpp-stanzas"
+ },
+
+ /** Function: addNamespace
+ * This function is used to extend the current namespaces in
+ * Strophe.NS. It takes a key and a value with the key being the
+ * name of the new namespace, with its actual value.
+ * For example:
+ * Strophe.addNamespace('PUBSUB', "http://jabber.org/protocol/pubsub");
+ *
+ * Parameters:
+ * (String) name - The name under which the namespace will be
+ * referenced under Strophe.NS
+ * (String) value - The actual namespace.
+ */
+ addNamespace: function (name, value)
+ {
+ Strophe.NS[name] = value;
+ },
+
+ /** Constants: Connection Status Constants
+ * Connection status constants for use by the connection handler
+ * callback.
+ *
+ * Status.ERROR - An error has occurred
+ * Status.CONNECTING - The connection is currently being made
+ * Status.CONNFAIL - The connection attempt failed
+ * Status.AUTHENTICATING - The connection is authenticating
+ * Status.AUTHFAIL - The authentication attempt failed
+ * Status.CONNECTED - The connection has succeeded
+ * Status.DISCONNECTED - The connection has been terminated
+ * Status.DISCONNECTING - The connection is currently being terminated
+ * Status.ATTACHED - The connection has been attached
+ */
+ Status: {
+ ERROR: 0,
+ CONNECTING: 1,
+ CONNFAIL: 2,
+ AUTHENTICATING: 3,
+ AUTHFAIL: 4,
+ CONNECTED: 5,
+ DISCONNECTED: 6,
+ DISCONNECTING: 7,
+ ATTACHED: 8
+ },
+
+ /** Constants: Log Level Constants
+ * Logging level indicators.
+ *
+ * LogLevel.DEBUG - Debug output
+ * LogLevel.INFO - Informational output
+ * LogLevel.WARN - Warnings
+ * LogLevel.ERROR - Errors
+ * LogLevel.FATAL - Fatal errors
+ */
+ LogLevel: {
+ DEBUG: 0,
+ INFO: 1,
+ WARN: 2,
+ ERROR: 3,
+ FATAL: 4
+ },
+
+ /** PrivateConstants: DOM Element Type Constants
+ * DOM element types.
+ *
+ * ElementType.NORMAL - Normal element.
+ * ElementType.TEXT - Text data element.
+ */
+ ElementType: {
+ NORMAL: 1,
+ TEXT: 3
+ },
+
+ /** PrivateConstants: Timeout Values
+ * Timeout values for error states. These values are in seconds.
+ * These should not be changed unless you know exactly what you are
+ * doing.
+ *
+ * TIMEOUT - Timeout multiplier. A waiting request will be considered
+ * failed after Math.floor(TIMEOUT * wait) seconds have elapsed.
+ * This defaults to 1.1, and with default wait, 66 seconds.
+ * SECONDARY_TIMEOUT - Secondary timeout multiplier. In cases where
+ * Strophe can detect early failure, it will consider the request
+ * failed if it doesn't return after
+ * Math.floor(SECONDARY_TIMEOUT * wait) seconds have elapsed.
+ * This defaults to 0.1, and with default wait, 6 seconds.
+ */
+ TIMEOUT: 1.1,
+ SECONDARY_TIMEOUT: 0.1,
+
+ /** Function: forEachChild
+ * Map a function over some or all child elements of a given element.
+ *
+ * This is a small convenience function for mapping a function over
+ * some or all of the children of an element. If elemName is null, all
+ * children will be passed to the function, otherwise only children
+ * whose tag names match elemName will be passed.
+ *
+ * Parameters:
+ * (XMLElement) elem - The element to operate on.
+ * (String) elemName - The child element tag name filter.
+ * (Function) func - The function to apply to each child. This
+ * function should take a single argument, a DOM element.
+ */
+ forEachChild: function (elem, elemName, func)
+ {
+ var i, childNode;
+
+ for (i = 0; i < elem._childNodes.length; i++) {
+ childNode = elem._childNodes[i];
+ if (childNode.nodeType == Strophe.ElementType.NORMAL &&
+ (!elemName || this.isTagEqual(childNode, elemName))) {
+ func(childNode);
+ }
+ }
+ },
+
+ /** Function: isTagEqual
+ * Compare an element's tag name with a string.
+ *
+ * This function is case insensitive.
+ *
+ * Parameters:
+ * (XMLElement) el - A DOM element.
+ * (String) name - The element name.
+ *
+ * Returns:
+ * true if the element's tag name matches _el_, and false
+ * otherwise.
+ */
+ isTagEqual: function (el, name)
+ {
+ return el.tagName.toLowerCase() == name.toLowerCase();
+ },
+
+ /** PrivateVariable: _xmlGenerator
+ * _Private_ variable that caches a DOM document to
+ * generate elements.
+ */
+ _xmlGenerator: null,
+
+ /** PrivateFunction: _makeGenerator
+ * _Private_ function that creates a dummy XML DOM document to serve as
+ * an element and text node generator.
+ */
+ _makeGenerator: function () {
+ var doc;
+
+ if (window.ActiveXObject) {
+ doc = this._getIEXmlDom();
+ doc.appendChild(doc.createElement('strophe'));
+ } else {
+ doc = document.implementation
+ .createDocument('jabber:client', 'strophe', null);
+ }
+
+ return doc;
+ },
+
+ /** Function: xmlGenerator
+ * Get the DOM document to generate elements.
+ *
+ * Returns:
+ * The currently used DOM document.
+ */
+ xmlGenerator: function () {
+ if (!Strophe._xmlGenerator) {
+ Strophe._xmlGenerator = Strophe._makeGenerator();
+ }
+ return Strophe._xmlGenerator;
+ },
+
+ /** PrivateFunction: _getIEXmlDom
+ * Gets IE xml doc object
+ *
+ * Returns:
+ * A Microsoft XML DOM Object
+ * See Also:
+ * http://msdn.microsoft.com/en-us/library/ms757837%28VS.85%29.aspx
+ */
+ _getIEXmlDom : function() {
+ var doc = null;
+ var docStrings = [
+ "Msxml2.DOMDocument.6.0",
+ "Msxml2.DOMDocument.5.0",
+ "Msxml2.DOMDocument.4.0",
+ "MSXML2.DOMDocument.3.0",
+ "MSXML2.DOMDocument",
+ "MSXML.DOMDocument",
+ "Microsoft.XMLDOM"
+ ];
+
+ for (var d = 0; d < docStrings.length; d++) {
+ if (doc === null) {
+ try {
+ doc = new ActiveXObject(docStrings[d]);
+ } catch (e) {
+ doc = null;
+ }
+ } else {
+ break;
+ }
+ }
+
+ return doc;
+ },
+
+ /** Function: xmlElement
+ * Create an XML DOM element.
+ *
+ * This function creates an XML DOM element correctly across all
+ * implementations. Note that these are not HTML DOM elements, which
+ * aren't appropriate for XMPP stanzas.
+ *
+ * Parameters:
+ * (String) name - The name for the element.
+ * (Array|Object) attrs - An optional array or object containing
+ * key/value pairs to use as element attributes. The object should
+ * be in the format {'key': 'value'} or {key: 'value'}. The array
+ * should have the format [['key1', 'value1'], ['key2', 'value2']].
+ * (String) text - The text child data for the element.
+ *
+ * Returns:
+ * A new XML DOM element.
+ */
+ xmlElement: function (name)
+ {
+ if (!name) { return null; }
+
+ var node = Strophe.xmlGenerator().createElement(name);
+
+ // FIXME: this should throw errors if args are the wrong type or
+ // there are more than two optional args
+ var a, i, k;
+ for (a = 1; a < arguments.length; a++) {
+ if (!arguments[a]) { continue; }
+ if (typeof(arguments[a]) == "string" ||
+ typeof(arguments[a]) == "number") {
+ node.appendChild(Strophe.xmlTextNode(arguments[a]));
+ } else if (typeof(arguments[a]) == "object" &&
+ typeof(arguments[a].sort) == "function") {
+ for (i = 0; i < arguments[a].length; i++) {
+ if (typeof(arguments[a][i]) == "object" &&
+ typeof(arguments[a][i].sort) == "function") {
+ node.setAttribute(arguments[a][i][0],
+ arguments[a][i][1]);
+ }
+ }
+ } else if (typeof(arguments[a]) == "object") {
+ for (k in arguments[a]) {
+ if (arguments[a].hasOwnProperty(k)) {
+ node.setAttribute(k, arguments[a][k]);
+ }
+ }
+ }
+ }
+
+ return node;
+ },
+
+ /* Function: xmlescape
+ * Excapes invalid xml characters.
+ *
+ * Parameters:
+ * (String) text - text to escape.
+ *
+ * Returns:
+ * Escaped text.
+ */
+ xmlescape: function(text)
+ {
+ text = text.replace(/\&/g, "&amp;");
+ text = text.replace(/</g, "&lt;");
+ text = text.replace(/>/g, "&gt;");
+ return text;
+ },
+
+ /** Function: xmlTextNode
+ * Creates an XML DOM text node.
+ *
+ * Provides a cross implementation version of document.createTextNode.
+ *
+ * Parameters:
+ * (String) text - The content of the text node.
+ *
+ * Returns:
+ * A new XML DOM text node.
+ */
+ xmlTextNode: function (text)
+ {
+ //ensure text is escaped
+ text = Strophe.xmlescape(text);
+
+ return Strophe.xmlGenerator().createTextNode(text);
+ },
+
+ /** Function: getText
+ * Get the concatenation of all text children of an element.
+ *
+ * Parameters:
+ * (XMLElement) elem - A DOM element.
+ *
+ * Returns:
+ * A String with the concatenated text of all text element children.
+ */
+ getText: function (elem)
+ {
+ if (!elem) { return null; }
+
+ var str = "";
+ if (elem._childNodes.length === 0 && elem.nodeType ==
+ Strophe.ElementType.TEXT) {
+ str += elem.nodeValue;
+ }
+
+ for (var i = 0; i < elem._childNodes.length; i++) {
+ if (elem._childNodes[i].nodeType == Strophe.ElementType.TEXT) {
+ str += elem._childNodes[i].nodeValue;
+ }
+ }
+
+ return str;
+ },
+
+ /** Function: copyElement
+ * Copy an XML DOM element.
+ *
+ * This function copies a DOM element and all its descendants and returns
+ * the new copy.
+ *
+ * Parameters:
+ * (XMLElement) elem - A DOM element.
+ *
+ * Returns:
+ * A new, copied DOM element tree.
+ */
+ copyElement: function (elem)
+ {
+ var i, el;
+ if (elem.nodeType == Strophe.ElementType.NORMAL) {
+ el = Strophe.xmlElement(elem.tagName);
+
+ for (i = 0; i < elem.attributes.length; i++) {
+ el.setAttribute(elem.attributes[i].nodeName.toLowerCase(),
+ elem.attributes[i].value);
+ }
+
+ for (i = 0; i < elem._childNodes.length; i++) {
+ el.appendChild(Strophe.copyElement(elem._childNodes[i]));
+ }
+ } else if (elem.nodeType == Strophe.ElementType.TEXT) {
+ el = Strophe.xmlTextNode(elem.nodeValue);
+ }
+
+ return el;
+ },
+
+ /** Function: escapeNode
+ * Escape the node part (also called local part) of a JID.
+ *
+ * Parameters:
+ * (String) node - A node (or local part).
+ *
+ * Returns:
+ * An escaped node (or local part).
+ */
+ escapeNode: function (node)
+ {
+ return node.replace(/^\s+|\s+$/g, '')
+ .replace(/\\/g, "\\5c")
+ .replace(/ /g, "\\20")
+ .replace(/\"/g, "\\22")
+ .replace(/\&/g, "\\26")
+ .replace(/\'/g, "\\27")
+ .replace(/\//g, "\\2f")
+ .replace(/:/g, "\\3a")
+ .replace(/</g, "\\3c")
+ .replace(/>/g, "\\3e")
+ .replace(/@/g, "\\40");
+ },
+
+ /** Function: unescapeNode
+ * Unescape a node part (also called local part) of a JID.
+ *
+ * Parameters:
+ * (String) node - A node (or local part).
+ *
+ * Returns:
+ * An unescaped node (or local part).
+ */
+ unescapeNode: function (node)
+ {
+ return node.replace(/\\20/g, " ")
+ .replace(/\\22/g, '"')
+ .replace(/\\26/g, "&")
+ .replace(/\\27/g, "'")
+ .replace(/\\2f/g, "/")
+ .replace(/\\3a/g, ":")
+ .replace(/\\3c/g, "<")
+ .replace(/\\3e/g, ">")
+ .replace(/\\40/g, "@")
+ .replace(/\\5c/g, "\\");
+ },
+
+ /** Function: getNodeFromJid
+ * Get the node portion of a JID String.
+ *
+ * Parameters:
+ * (String) jid - A JID.
+ *
+ * Returns:
+ * A String containing the node.
+ */
+ getNodeFromJid: function (jid)
+ {
+ if (jid.indexOf("@") < 0) { return null; }
+ return jid.split("@")[0];
+ },
+
+ /** Function: getDomainFromJid
+ * Get the domain portion of a JID String.
+ *
+ * Parameters:
+ * (String) jid - A JID.
+ *
+ * Returns:
+ * A String containing the domain.
+ */
+ getDomainFromJid: function (jid)
+ {
+ var bare = Strophe.getBareJidFromJid(jid);
+ if (bare.indexOf("@") < 0) {
+ return bare;
+ } else {
+ var parts = bare.split("@");
+ parts.splice(0, 1);
+ return parts.join('@');
+ }
+ },
+
+ /** Function: getResourceFromJid
+ * Get the resource portion of a JID String.
+ *
+ * Parameters:
+ * (String) jid - A JID.
+ *
+ * Returns:
+ * A String containing the resource.
+ */
+ getResourceFromJid: function (jid)
+ {
+ var s = jid.split("/");
+ if (s.length < 2) { return null; }
+ s.splice(0, 1);
+ return s.join('/');
+ },
+
+ /** Function: getBareJidFromJid
+ * Get the bare JID from a JID String.
+ *
+ * Parameters:
+ * (String) jid - A JID.
+ *
+ * Returns:
+ * A String containing the bare JID.
+ */
+ getBareJidFromJid: function (jid)
+ {
+ return jid ? jid.split("/")[0] : null;
+ },
+
+ /** Function: log
+ * User overrideable logging function.
+ *
+ * This function is called whenever the Strophe library calls any
+ * of the logging functions. The default implementation of this
+ * function does nothing. If client code wishes to handle the logging
+ * messages, it should override this with
+ * > Strophe.log = function (level, msg) {
+ * > (user code here)
+ * > };
+ *
+ * Please note that data sent and received over the wire is logged
+ * via Strophe.Connection.rawInput() and Strophe.Connection.rawOutput().
+ *
+ * The different levels and their meanings are
+ *
+ * DEBUG - Messages useful for debugging purposes.
+ * INFO - Informational messages. This is mostly information like
+ * 'disconnect was called' or 'SASL auth succeeded'.
+ * WARN - Warnings about potential problems. This is mostly used
+ * to report transient connection errors like request timeouts.
+ * ERROR - Some error occurred.
+ * FATAL - A non-recoverable fatal error occurred.
+ *
+ * Parameters:
+ * (Integer) level - The log level of the log message. This will
+ * be one of the values in Strophe.LogLevel.
+ * (String) msg - The log message.
+ */
+ log: function (level, msg)
+ {
+ return;
+ },
+
+ /** Function: debug
+ * Log a message at the Strophe.LogLevel.DEBUG level.
+ *
+ * Parameters:
+ * (String) msg - The log message.
+ */
+ debug: function(msg)
+ {
+ this.log(this.LogLevel.DEBUG, msg);
+ },
+
+ /** Function: info
+ * Log a message at the Strophe.LogLevel.INFO level.
+ *
+ * Parameters:
+ * (String) msg - The log message.
+ */
+ info: function (msg)
+ {
+ this.log(this.LogLevel.INFO, msg);
+ },
+
+ /** Function: warn
+ * Log a message at the Strophe.LogLevel.WARN level.
+ *
+ * Parameters:
+ * (String) msg - The log message.
+ */
+ warn: function (msg)
+ {
+ this.log(this.LogLevel.WARN, msg);
+ },
+
+ /** Function: error
+ * Log a message at the Strophe.LogLevel.ERROR level.
+ *
+ * Parameters:
+ * (String) msg - The log message.
+ */
+ error: function (msg)
+ {
+ this.log(this.LogLevel.ERROR, msg);
+ },
+
+ /** Function: fatal
+ * Log a message at the Strophe.LogLevel.FATAL level.
+ *
+ * Parameters:
+ * (String) msg - The log message.
+ */
+ fatal: function (msg)
+ {
+ this.log(this.LogLevel.FATAL, msg);
+ },
+
+ /** Function: serialize
+ * Render a DOM element and all descendants to a String.
+ *
+ * Parameters:
+ * (XMLElement) elem - A DOM element.
+ *
+ * Returns:
+ * The serialized element tree as a String.
+ */
+ serialize: function (elem)
+ {
+ var result;
+
+ if (!elem) { return null; }
+
+ if (typeof(elem.tree) === "function") {
+ elem = elem.tree();
+ }
+
+ var nodeName = elem.nodeName.toLowerCase();
+ var i, child;
+
+ if (elem.getAttribute("_realname")) {
+ nodeName = elem.getAttribute("_realname").toLowerCase();
+ }
+
+ result = "<" + nodeName.toLowerCase();
+ for (i = 0; i < elem.attributes.length; i++) {
+ if(elem.attributes[i].nodeName.toLowerCase() != "_realname") {
+ result += " " + elem.attributes[i].nodeName.toLowerCase() +
+ "='" + elem.attributes[i].value
+ .replace(/&/g, "&amp;")
+ .replace(/\'/g, "&apos;")
+ .replace(/</g, "&lt;") + "'";
+ }
+ }
+
+ if (elem._childNodes.length > 0) {
+ result += ">";
+ for (i = 0; i < elem._childNodes.length; i++) {
+ child = elem._childNodes[i];
+ if (child.nodeType == Strophe.ElementType.NORMAL) {
+ // normal element, so recurse
+ result += Strophe.serialize(child);
+ } else if (child.nodeType == Strophe.ElementType.TEXT) {
+ // text element
+ result += child.nodeValue;
+ }
+ }
+ result += "</" + nodeName.toLowerCase() + ">";
+ } else {
+ result += "/>";
+ }
+
+ return result;
+ },
+
+ /** PrivateVariable: _requestId
+ * _Private_ variable that keeps track of the request ids for
+ * connections.
+ */
+ _requestId: 0,
+
+ /** PrivateVariable: Strophe.connectionPlugins
+ * _Private_ variable Used to store plugin names that need
+ * initialization on Strophe.Connection construction.
+ */
+ _connectionPlugins: {},
+
+ /** Function: addConnectionPlugin
+ * Extends the Strophe.Connection object with the given plugin.
+ *
+ * Paramaters:
+ * (String) name - The name of the extension.
+ * (Object) ptype - The plugin's prototype.
+ */
+ addConnectionPlugin: function (name, ptype)
+ {
+ Strophe._connectionPlugins[name] = ptype;
+ }
+};
+
+/** Class: Strophe.Builder
+ * XML DOM builder.
+ *
+ * This object provides an interface similar to JQuery but for building
+ * DOM element easily and rapidly. All the functions except for toString()
+ * and tree() return the object, so calls can be chained. Here's an
+ * example using the $iq() builder helper.
+ * > $iq({to: 'you', from: 'me', type: 'get', id: '1'})
+ * > .c('query', {xmlns: 'strophe:example'})
+ * > .c('example')
+ * > .toString()
+ * The above generates this XML fragment
+ * > <iq to='you' from='me' type='get' id='1'>
+ * > <query xmlns='strophe:example'>
+ * > <example/>
+ * > </query>
+ * > </iq>
+ * The corresponding DOM manipulations to get a similar fragment would be
+ * a lot more tedious and probably involve several helper variables.
+ *
+ * Since adding children makes new operations operate on the child, up()
+ * is provided to traverse up the tree. To add two children, do
+ * > builder.c('child1', ...).up().c('child2', ...)
+ * The next operation on the Builder will be relative to the second child.
+ */
+
+/** Constructor: Strophe.Builder
+ * Create a Strophe.Builder object.
+ *
+ * The attributes should be passed in object notation. For example
+ * > var b = new Builder('message', {to: 'you', from: 'me'});
+ * or
+ * > var b = new Builder('messsage', {'xml:lang': 'en'});
+ *
+ * Parameters:
+ * (String) name - The name of the root element.
+ * (Object) attrs - The attributes for the root element in object notation.
+ *
+ * Returns:
+ * A new Strophe.Builder.
+ */
+Strophe.Builder = function (name, attrs)
+{
+ // Set correct namespace for jabber:client elements
+ if (name == "presence" || name == "message" || name == "iq") {
+ if (attrs && !attrs.xmlns) {
+ attrs.xmlns = Strophe.NS.CLIENT;
+ } else if (!attrs) {
+ attrs = {xmlns: Strophe.NS.CLIENT};
+ }
+ }
+
+ // Holds the tree being built.
+ this.nodeTree = Strophe.xmlElement(name, attrs);
+
+ // Points to the current operation node.
+ this.node = this.nodeTree;
+};
+
+Strophe.Builder.prototype = {
+ /** Function: tree
+ * Return the DOM tree.
+ *
+ * This function returns the current DOM tree as an element object. This
+ * is suitable for passing to functions like Strophe.Connection.send().
+ *
+ * Returns:
+ * The DOM tree as a element object.
+ */
+ tree: function ()
+ {
+ return this.nodeTree;
+ },
+
+ /** Function: toString
+ * Serialize the DOM tree to a String.
+ *
+ * This function returns a string serialization of the current DOM
+ * tree. It is often used internally to pass data to a
+ * Strophe.Request object.
+ *
+ * Returns:
+ * The serialized DOM tree in a String.
+ */
+ toString: function ()
+ {
+ return Strophe.serialize(this.nodeTree);
+ },
+
+ /** Function: up
+ * Make the current parent element the new current element.
+ *
+ * This function is often used after c() to traverse back up the tree.
+ * For example, to add two children to the same element
+ * > builder.c('child1', {}).up().c('child2', {});
+ *
+ * Returns:
+ * The Stophe.Builder object.
+ */
+ up: function ()
+ {
+ this.node = this.node.parentNode;
+ return this;
+ },
+
+ /** Function: attrs
+ * Add or modify attributes of the current element.
+ *
+ * The attributes should be passed in object notation. This function
+ * does not move the current element pointer.
+ *
+ * Parameters:
+ * (Object) moreattrs - The attributes to add/modify in object notation.
+ *
+ * Returns:
+ * The Strophe.Builder object.
+ */
+ attrs: function (moreattrs)
+ {
+ for (var k in moreattrs) {
+ if (moreattrs.hasOwnProperty(k)) {
+ this.node.setAttribute(k, moreattrs[k]);
+ }
+ }
+ return this;
+ },
+
+ /** Function: c
+ * Add a child to the current element and make it the new current
+ * element.
+ *
+ * This function moves the current element pointer to the child. If you
+ * need to add another child, it is necessary to use up() to go back
+ * to the parent in the tree.
+ *
+ * Parameters:
+ * (String) name - The name of the child.
+ * (Object) attrs - The attributes of the child in object notation.
+ *
+ * Returns:
+ * The Strophe.Builder object.
+ */
+ c: function (name, attrs)
+ {
+ var child = Strophe.xmlElement(name, attrs);
+ this.node.appendChild(child);
+ this.node = child;
+ return this;
+ },
+
+ /** Function: cnode
+ * Add a child to the current element and make it the new current
+ * element.
+ *
+ * This function is the same as c() except that instead of using a
+ * name and an attributes object to create the child it uses an
+ * existing DOM element object.
+ *
+ * Parameters:
+ * (XMLElement) elem - A DOM element.
+ *
+ * Returns:
+ * The Strophe.Builder object.
+ */
+ cnode: function (elem)
+ {
+ var xmlGen = Strophe.xmlGenerator();
+ var newElem = xmlGen.importNode ? xmlGen.importNode(elem, true) : Strophe.copyElement(elem);
+ this.node.appendChild(newElem);
+ this.node = newElem;
+ return this;
+ },
+
+ /** Function: t
+ * Add a child text element.
+ *
+ * This *does not* make the child the new current element since there
+ * are no children of text elements.
+ *
+ * Parameters:
+ * (String) text - The text data to append to the current element.
+ *
+ * Returns:
+ * The Strophe.Builder object.
+ */
+ t: function (text)
+ {
+ var child = Strophe.xmlTextNode(text);
+ this.node.appendChild(child);
+ return this;
+ }
+};
+
+
+/** PrivateClass: Strophe.Handler
+ * _Private_ helper class for managing stanza handlers.
+ *
+ * A Strophe.Handler encapsulates a user provided callback function to be
+ * executed when matching stanzas are received by the connection.
+ * Handlers can be either one-off or persistant depending on their
+ * return value. Returning true will cause a Handler to remain active, and
+ * returning false will remove the Handler.
+ *
+ * Users will not use Strophe.Handler objects directly, but instead they
+ * will use Strophe.Connection.addHandler() and
+ * Strophe.Connection.deleteHandler().
+ */
+
+/** PrivateConstructor: Strophe.Handler
+ * Create and initialize a new Strophe.Handler.
+ *
+ * Parameters:
+ * (Function) handler - A function to be executed when the handler is run.
+ * (String) ns - The namespace to match.
+ * (String) name - The element name to match.
+ * (String) type - The element type to match.
+ * (String) id - The element id attribute to match.
+ * (String) from - The element from attribute to match.
+ * (Object) options - Handler options
+ *
+ * Returns:
+ * A new Strophe.Handler object.
+ */
+Strophe.Handler = function (handler, ns, name, type, id, from, options)
+{
+ this.handler = handler;
+ this.ns = ns;
+ this.name = name;
+ this.type = type;
+ this.id = id;
+ this.options = options || {matchbare: false};
+
+ // default matchBare to false if undefined
+ if (!this.options.matchBare) {
+ this.options.matchBare = false;
+ }
+
+ if (this.options.matchBare) {
+ this.from = from ? Strophe.getBareJidFromJid(from) : null;
+ } else {
+ this.from = from;
+ }
+
+ // whether the handler is a user handler or a system handler
+ this.user = true;
+};
+
+Strophe.Handler.prototype = {
+ /** PrivateFunction: isMatch
+ * Tests if a stanza matches the Strophe.Handler.
+ *
+ * Parameters:
+ * (XMLElement) elem - The XML element to test.
+ *
+ * Returns:
+ * true if the stanza matches and false otherwise.
+ */
+ isMatch: function (elem)
+ {
+ var nsMatch;
+ var from = null;
+
+ if (this.options.matchBare) {
+ from = Strophe.getBareJidFromJid(elem.getAttribute('from'));
+ } else {
+ from = elem.getAttribute('from');
+ }
+
+ nsMatch = false;
+ if (!this.ns) {
+ nsMatch = true;
+ } else {
+ var that = this;
+ Strophe.forEachChild(elem, null, function (elem) {
+ if (elem.getAttribute("xmlns") == that.ns) {
+ nsMatch = true;
+ }
+ });
+
+ nsMatch = nsMatch || elem.getAttribute("xmlns") == this.ns;
+ }
+
+ if (nsMatch &&
+ (!this.name || Strophe.isTagEqual(elem, this.name)) &&
+ (!this.type || elem.getAttribute("type") == this.type) &&
+ (!this.id || elem.getAttribute("id") == this.id) &&
+ (!this.from || from == this.from)) {
+ return true;
+ }
+
+ return false;
+ },
+
+ /** PrivateFunction: run
+ * Run the callback on a matching stanza.
+ *
+ * Parameters:
+ * (XMLElement) elem - The DOM element that triggered the
+ * Strophe.Handler.
+ *
+ * Returns:
+ * A boolean indicating if the handler should remain active.
+ */
+ run: function (elem)
+ {
+ var result = null;
+ try {
+ result = this.handler(elem);
+ } catch (e) {
+ if (e.sourceURL) {
+ Strophe.fatal("error: " + this.handler +
+ " " + e.sourceURL + ":" +
+ e.line + " - " + e.name + ": " + e.message);
+ } else if (e.fileName) {
+ if (typeof(console) != "undefined") {
+ console.trace();
+ console.error(this.handler, " - error - ", e, e.message);
+ }
+ Strophe.fatal("error: " + this.handler + " " +
+ e.fileName + ":" + e.lineNumber + " - " +
+ e.name + ": " + e.message);
+ } else {
+ Strophe.fatal("error: " + this.handler);
+ }
+
+ throw e;
+ }
+
+ return result;
+ },
+
+ /** PrivateFunction: toString
+ * Get a String representation of the Strophe.Handler object.
+ *
+ * Returns:
+ * A String.
+ */
+ toString: function ()
+ {
+ return "{Handler: " + this.handler + "(" + this.name + "," +
+ this.id + "," + this.ns + ")}";
+ }
+};
+
+/** PrivateClass: Strophe.TimedHandler
+ * _Private_ helper class for managing timed handlers.
+ *
+ * A Strophe.TimedHandler encapsulates a user provided callback that
+ * should be called after a certain period of time or at regular
+ * intervals. The return value of the callback determines whether the
+ * Strophe.TimedHandler will continue to fire.
+ *
+ * Users will not use Strophe.TimedHandler objects directly, but instead
+ * they will use Strophe.Connection.addTimedHandler() and
+ * Strophe.Connection.deleteTimedHandler().
+ */
+
+/** PrivateConstructor: Strophe.TimedHandler
+ * Create and initialize a new Strophe.TimedHandler object.
+ *
+ * Parameters:
+ * (Integer) period - The number of milliseconds to wait before the
+ * handler is called.
+ * (Function) handler - The callback to run when the handler fires. This
+ * function should take no arguments.
+ *
+ * Returns:
+ * A new Strophe.TimedHandler object.
+ */
+Strophe.TimedHandler = function (period, handler)
+{
+ this.period = period;
+ this.handler = handler;
+
+ this.lastCalled = new Date().getTime();
+ this.user = true;
+};
+
+Strophe.TimedHandler.prototype = {
+ /** PrivateFunction: run
+ * Run the callback for the Strophe.TimedHandler.
+ *
+ * Returns:
+ * true if the Strophe.TimedHandler should be called again, and false
+ * otherwise.
+ */
+ run: function ()
+ {
+ this.lastCalled = new Date().getTime();
+ return this.handler();
+ },
+
+ /** PrivateFunction: reset
+ * Reset the last called time for the Strophe.TimedHandler.
+ */
+ reset: function ()
+ {
+ this.lastCalled = new Date().getTime();
+ },
+
+ /** PrivateFunction: toString
+ * Get a string representation of the Strophe.TimedHandler object.
+ *
+ * Returns:
+ * The string representation.
+ */
+ toString: function ()
+ {
+ return "{TimedHandler: " + this.handler + "(" + this.period +")}";
+ }
+};
+
+/** PrivateClass: Strophe.Request
+ * _Private_ helper class that provides a cross implementation abstraction
+ * for a BOSH related XMLHttpRequest.
+ *
+ * The Strophe.Request class is used internally to encapsulate BOSH request
+ * information. It is not meant to be used from user's code.
+ */
+
+/** PrivateConstructor: Strophe.Request
+ * Create and initialize a new Strophe.Request object.
+ *
+ * Parameters:
+ * (XMLElement) elem - The XML data to be sent in the request.
+ * (Function) func - The function that will be called when the
+ * XMLHttpRequest readyState changes.
+ * (Integer) rid - The BOSH rid attribute associated with this request.
+ * (Integer) sends - The number of times this same request has been
+ * sent.
+ */
+Strophe.Request = function (elem, func, rid, sends)
+{
+ this.id = ++Strophe._requestId;
+ this.xmlData = elem;
+ this.data = Strophe.serialize(elem);
+ // save original function in case we need to make a new request
+ // from this one.
+ this.origFunc = func;
+ this.func = func;
+ this.rid = rid;
+ this.date = NaN;
+ this.sends = sends || 0;
+ this.abort = false;
+ this.dead = null;
+ this.age = function () {
+ if (!this.date) { return 0; }
+ var now = new Date();
+ return (now - this.date) / 1000;
+ };
+ this.timeDead = function () {
+ if (!this.dead) { return 0; }
+ var now = new Date();
+ return (now - this.dead) / 1000;
+ };
+ this.xhr = this._newXHR();
+};
+
+Strophe.Request.prototype = {
+ /** PrivateFunction: getResponse
+ * Get a response from the underlying XMLHttpRequest.
+ *
+ * This function attempts to get a response from the request and checks
+ * for errors.
+ *
+ * Throws:
+ * "parsererror" - A parser error occured.
+ *
+ * Returns:
+ * The DOM element tree of the response.
+ */
+ getResponse: function ()
+ {
+ // console.log("getResponse:", this.xhr.responseXML, ":", this.xhr.responseText);
+
+ var node = null;
+ if (this.xhr.responseXML && this.xhr.responseXML.documentElement) {
+ node = this.xhr.responseXML.documentElement;
+ if (node.tagName == "parsererror") {
+ Strophe.error("invalid response received");
+ Strophe.error("responseText: " + this.xhr.responseText);
+ Strophe.error("responseXML: " +
+ Strophe.serialize(this.xhr.responseXML));
+ throw "parsererror";
+ }
+ } else if (this.xhr.responseText) {
+ // Hack for node.
+ var _div = document.createElement("div");
+ _div.innerHTML = this.xhr.responseText;
+ node = _div._childNodes[0];
+
+ Strophe.error("invalid response received");
+ Strophe.error("responseText: " + this.xhr.responseText);
+ Strophe.error("responseXML: " +
+ Strophe.serialize(this.xhr.responseXML));
+ }
+
+ return node;
+ },
+
+ /** PrivateFunction: _newXHR
+ * _Private_ helper function to create XMLHttpRequests.
+ *
+ * This function creates XMLHttpRequests across all implementations.
+ *
+ * Returns:
+ * A new XMLHttpRequest.
+ */
+ _newXHR: function ()
+ {
+ var xhr = null;
+ if (window.XMLHttpRequest) {
+ xhr = new XMLHttpRequest();
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType("text/xml");
+ }
+ } else if (window.ActiveXObject) {
+ xhr = new ActiveXObject("Microsoft.XMLHTTP");
+ }
+
+ // use Function.bind() to prepend ourselves as an argument
+ xhr.onreadystatechange = this.func.bind(null, this);
+
+ return xhr;
+ }
+};
+
+/** Class: Strophe.Connection
+ * XMPP Connection manager.
+ *
+ * Thie class is the main part of Strophe. It manages a BOSH connection
+ * to an XMPP server and dispatches events to the user callbacks as
+ * data arrives. It supports SASL PLAIN, SASL DIGEST-MD5, and legacy
+ * authentication.
+ *
+ * After creating a Strophe.Connection object, the user will typically
+ * call connect() with a user supplied callback to handle connection level
+ * events like authentication failure, disconnection, or connection
+ * complete.
+ *
+ * The user will also have several event handlers defined by using
+ * addHandler() and addTimedHandler(). These will allow the user code to
+ * respond to interesting stanzas or do something periodically with the
+ * connection. These handlers will be active once authentication is
+ * finished.
+ *
+ * To send data to the connection, use send().
+ */
+
+/** Constructor: Strophe.Connection
+ * Create and initialize a Strophe.Connection object.
+ *
+ * Parameters:
+ * (String) service - The BOSH service URL.
+ *
+ * Returns:
+ * A new Strophe.Connection object.
+ */
+Strophe.Connection = function (service)
+{
+ /* The path to the httpbind service. */
+ this.service = service;
+ /* The connected JID. */
+ this.jid = "";
+ /* request id for body tags */
+ this.rid = Math.floor(Math.random() * 4294967295);
+ /* The current session ID. */
+ this.sid = null;
+ this.streamId = null;
+ /* stream:features */
+ this.features = null;
+
+ // SASL
+ this.do_session = false;
+ this.do_bind = false;
+
+ // handler lists
+ this.timedHandlers = [];
+ this.handlers = [];
+ this.removeTimeds = [];
+ this.removeHandlers = [];
+ this.addTimeds = [];
+ this.addHandlers = [];
+
+ this._idleTimeout = null;
+ this._disconnectTimeout = null;
+
+ this.authenticated = false;
+ this.disconnecting = false;
+ this.connected = false;
+
+ this.errors = 0;
+
+ this.paused = false;
+
+ // default BOSH values
+ this.hold = 1;
+ this.wait = 60;
+ this.window = 5;
+
+ this._data = [];
+ this._requests = [];
+ this._uniqueId = Math.round(Math.random() * 10000);
+
+ this._sasl_success_handler = null;
+ this._sasl_failure_handler = null;
+ this._sasl_challenge_handler = null;
+
+ // setup onIdle callback every 1/10th of a second
+ this._idleTimeout = setTimeout(this._onIdle.bind(this), 100);
+
+ // initialize plugins
+ for (var k in Strophe._connectionPlugins) {
+ if (Strophe._connectionPlugins.hasOwnProperty(k)) {
+ var ptype = Strophe._connectionPlugins[k];
+ // jslint complaints about the below line, but this is fine
+ var F = function () {};
+ F.prototype = ptype;
+ this[k] = new F();
+ this[k].init(this);
+ }
+ }
+};
+
+Strophe.Connection.prototype = {
+ /** Function: reset
+ * Reset the connection.
+ *
+ * This function should be called after a connection is disconnected
+ * before that connection is reused.
+ */
+ reset: function ()
+ {
+ this.rid = Math.floor(Math.random() * 4294967295);
+
+ this.sid = null;
+ this.streamId = null;
+
+ // SASL
+ this.do_session = false;
+ this.do_bind = false;
+
+ // handler lists
+ this.timedHandlers = [];
+ this.handlers = [];
+ this.removeTimeds = [];
+ this.removeHandlers = [];
+ this.addTimeds = [];
+ this.addHandlers = [];
+
+ this.authenticated = false;
+ this.disconnecting = false;
+ this.connected = false;
+
+ this.errors = 0;
+
+ this._requests = [];
+ this._uniqueId = Math.round(Math.random()*10000);
+ },
+
+ /** Function: pause
+ * Pause the request manager.
+ *
+ * This will prevent Strophe from sending any more requests to the
+ * server. This is very useful for temporarily pausing while a lot
+ * of send() calls are happening quickly. This causes Strophe to
+ * send the data in a single request, saving many request trips.
+ */
+ pause: function ()
+ {
+ this.paused = true;
+ },
+
+ /** Function: resume
+ * Resume the request manager.
+ *
+ * This resumes after pause() has been called.
+ */
+ resume: function ()
+ {
+ this.paused = false;
+ },
+
+ /** Function: getUniqueId
+ * Generate a unique ID for use in <iq/> elements.
+ *
+ * All <iq/> stanzas are required to have unique id attributes. This
+ * function makes creating these easy. Each connection instance has
+ * a counter which starts from zero, and the value of this counter
+ * plus a colon followed by the suffix becomes the unique id. If no
+ * suffix is supplied, the counter is used as the unique id.
+ *
+ * Suffixes are used to make debugging easier when reading the stream
+ * data, and their use is recommended. The counter resets to 0 for
+ * every new connection for the same reason. For connections to the
+ * same server that authenticate the same way, all the ids should be
+ * the same, which makes it easy to see changes. This is useful for
+ * automated testing as well.
+ *
+ * Parameters:
+ * (String) suffix - A optional suffix to append to the id.
+ *
+ * Returns:
+ * A unique string to be used for the id attribute.
+ */
+ getUniqueId: function (suffix)
+ {
+ if (typeof(suffix) == "string" || typeof(suffix) == "number") {
+ return ++this._uniqueId + ":" + suffix;
+ } else {
+ return ++this._uniqueId + "";
+ }
+ },
+
+ /** Function: connect
+ * Starts the connection process.
+ *
+ * As the connection process proceeds, the user supplied callback will
+ * be triggered multiple times with status updates. The callback
+ * should take two arguments - the status code and the error condition.
+ *
+ * The status code will be one of the values in the Strophe.Status
+ * constants. The error condition will be one of the conditions
+ * defined in RFC 3920 or the condition 'strophe-parsererror'.
+ *
+ * Please see XEP 124 for a more detailed explanation of the optional
+ * parameters below.
+ *
+ * Parameters:
+ * (String) jid - The user's JID. This may be a bare JID,
+ * or a full JID. If a node is not supplied, SASL ANONYMOUS
+ * authentication will be attempted.
+ * (String) pass - The user's password.
+ * (Function) callback The connect callback function.
+ * (Integer) wait - The optional HTTPBIND wait value. This is the
+ * time the server will wait before returning an empty result for
+ * a request. The default setting of 60 seconds is recommended.
+ * Other settings will require tweaks to the Strophe.TIMEOUT value.
+ * (Integer) hold - The optional HTTPBIND hold value. This is the
+ * number of connections the server will hold at one time. This
+ * should almost always be set to 1 (the default).
+ */
+ connect: function (jid, pass, callback, wait, hold, route)
+ {
+ this.jid = jid;
+ this.pass = pass;
+ this.connect_callback = callback;
+ this.disconnecting = false;
+ this.connected = false;
+ this.authenticated = false;
+ this.errors = 0;
+
+ this.wait = wait || this.wait;
+ this.hold = hold || this.hold;
+
+ // parse jid for domain and resource
+ this.domain = Strophe.getDomainFromJid(this.jid);
+
+ // build the body tag
+ var body_attrs = {
+ to: this.domain,
+ "xml:lang": "en",
+ wait: this.wait,
+ hold: this.hold,
+ content: "text/xml; charset=utf-8",
+ ver: "1.6",
+ "xmpp:version": "1.0",
+ "xmlns:xmpp": Strophe.NS.BOSH
+ };
+ if (route) {
+ body_attrs.route = route;
+ }
+
+ var body = this._buildBody().attrs(body_attrs);
+
+ this._changeConnectStatus(Strophe.Status.CONNECTING, null);
+
+ this._requests.push(
+ new Strophe.Request(body.tree(),
+ this._onRequestStateChange.bind(
+ this, this._connect_cb.bind(this)),
+ body.tree().getAttribute("rid")));
+ this._throttledRequestHandler();
+ },
+
+ /** Function: attach
+ * Attach to an already created and authenticated BOSH session.
+ *
+ * This function is provided to allow Strophe to attach to BOSH
+ * sessions which have been created externally, perhaps by a Web
+ * application. This is often used to support auto-login type features
+ * without putting user credentials into the page.
+ *
+ * Parameters:
+ * (String) jid - The full JID that is bound by the session.
+ * (String) sid - The SID of the BOSH session.
+ * (String) rid - The current RID of the BOSH session. This RID
+ * will be used by the next request.
+ * (Function) callback The connect callback function.
+ * (Integer) wait - The optional HTTPBIND wait value. This is the
+ * time the server will wait before returning an empty result for
+ * a request. The default setting of 60 seconds is recommended.
+ * Other settings will require tweaks to the Strophe.TIMEOUT value.
+ * (Integer) hold - The optional HTTPBIND hold value. This is the
+ * number of connections the server will hold at one time. This
+ * should almost always be set to 1 (the default).
+ * (Integer) wind - The optional HTTBIND window value. This is the
+ * allowed range of request ids that are valid. The default is 5.
+ */
+ attach: function (jid, sid, rid, callback, wait, hold, wind)
+ {
+ this.jid = jid;
+ this.sid = sid;
+ this.rid = rid;
+ this.connect_callback = callback;
+
+ this.domain = Strophe.getDomainFromJid(this.jid);
+
+ this.authenticated = true;
+ this.connected = true;
+
+ this.wait = wait || this.wait;
+ this.hold = hold || this.hold;
+ this.window = wind || this.window;
+
+ this._changeConnectStatus(Strophe.Status.ATTACHED, null);
+ },
+
+ /** Function: xmlInput
+ * User overrideable function that receives XML data coming into the
+ * connection.
+ *
+ * The default function does nothing. User code can override this with
+ * > Strophe.Connection.xmlInput = function (elem) {
+ * > (user code)
+ * > };
+ *
+ * Parameters:
+ * (XMLElement) elem - The XML data received by the connection.
+ */
+ xmlInput: function (elem)
+ {
+ return;
+ },
+
+ /** Function: xmlOutput
+ * User overrideable function that receives XML data sent to the
+ * connection.
+ *
+ * The default function does nothing. User code can override this with
+ * > Strophe.Connection.xmlOutput = function (elem) {
+ * > (user code)
+ * > };
+ *
+ * Parameters:
+ * (XMLElement) elem - The XMLdata sent by the connection.
+ */
+ xmlOutput: function (elem)
+ {
+ return;
+ },
+
+ /** Function: rawInput
+ * User overrideable function that receives raw data coming into the
+ * connection.
+ *
+ * The default function does nothing. User code can override this with
+ * > Strophe.Connection.rawInput = function (data) {
+ * > (user code)
+ * > };
+ *
+ * Parameters:
+ * (String) data - The data received by the connection.
+ */
+ rawInput: function (data)
+ {
+ return;
+ },
+
+ /** Function: rawOutput
+ * User overrideable function that receives raw data sent to the
+ * connection.
+ *
+ * The default function does nothing. User code can override this with
+ * > Strophe.Connection.rawOutput = function (data) {
+ * > (user code)
+ * > };
+ *
+ * Parameters:
+ * (String) data - The data sent by the connection.
+ */
+ rawOutput: function (data)
+ {
+ return;
+ },
+
+ /** Function: send
+ * Send a stanza.
+ *
+ * This function is called to push data onto the send queue to
+ * go out over the wire. Whenever a request is sent to the BOSH
+ * server, all pending data is sent and the queue is flushed.
+ *
+ * Parameters:
+ * (XMLElement |
+ * [XMLElement] |
+ * Strophe.Builder) elem - The stanza to send.
+ */
+ send: function (elem)
+ {
+ if (elem === null) { return ; }
+ if (typeof(elem.sort) === "function") {
+ for (var i = 0; i < elem.length; i++) {
+ this._queueData(elem[i]);
+ }
+ } else if (typeof(elem.tree) === "function") {
+ this._queueData(elem.tree());
+ } else {
+ this._queueData(elem);
+ }
+
+ this._throttledRequestHandler();
+ clearTimeout(this._idleTimeout);
+ this._idleTimeout = setTimeout(this._onIdle.bind(this), 100);
+ },
+
+ /** Function: flush
+ * Immediately send any pending outgoing data.
+ *
+ * Normally send() queues outgoing data until the next idle period
+ * (100ms), which optimizes network use in the common cases when
+ * several send()s are called in succession. flush() can be used to
+ * immediately send all pending data.
+ */
+ flush: function ()
+ {
+ // cancel the pending idle period and run the idle function
+ // immediately
+ clearTimeout(this._idleTimeout);
+ this._onIdle();
+ },
+
+ /** Function: sendIQ
+ * Helper function to send IQ stanzas.
+ *
+ * Parameters:
+ * (XMLElement) elem - The stanza to send.
+ * (Function) callback - The callback function for a successful request.
+ * (Function) errback - The callback function for a failed or timed
+ * out request. On timeout, the stanza will be null.
+ * (Integer) timeout - The time specified in milliseconds for a
+ * timeout to occur.
+ *
+ * Returns:
+ * The id used to send the IQ.
+ */
+ sendIQ: function(elem, callback, errback, timeout) {
+ var timeoutHandler = null;
+ var that = this;
+
+ if (typeof(elem.tree) === "function") {
+ elem = elem.tree();
+ }
+ var id = elem.getAttribute('id');
+
+ // inject id if not found
+ if (!id) {
+ id = this.getUniqueId("sendIQ");
+ elem.setAttribute("id", id);
+ }
+
+ var handler = this.addHandler(function (stanza) {
+ // remove timeout handler if there is one
+ if (timeoutHandler) {
+ that.deleteTimedHandler(timeoutHandler);
+ }
+
+ var iqtype = stanza.getAttribute('type');
+ if (iqtype == 'result') {
+ if (callback) {
+ callback(stanza);
+ }
+ } else if (iqtype == 'error') {
+ if (errback) {
+ errback(stanza);
+ }
+ } else {
+ throw {
+ name: "StropheError",
+ message: "Got bad IQ type of " + iqtype
+ };
+ }
+ }, null, 'iq', null, id);
+
+ // if timeout specified, setup timeout handler.
+ if (timeout) {
+ timeoutHandler = this.addTimedHandler(timeout, function () {
+ // get rid of normal handler
+ that.deleteHandler(handler);
+
+ // call errback on timeout with null stanza
+ if (errback) {
+ errback(null);
+ }
+ return false;
+ });
+ }
+
+ this.send(elem);
+
+ return id;
+ },
+
+ /** PrivateFunction: _queueData
+ * Queue outgoing data for later sending. Also ensures that the data
+ * is a DOMElement.
+ */
+ _queueData: function (element) {
+ if (element === null ||
+ !element.tagName ||
+ !element._childNodes) {
+ throw {
+ name: "StropheError",
+ message: "Cannot queue non-DOMElement."
+ };
+ }
+
+ this._data.push(element);
+ },
+
+ /** PrivateFunction: _sendRestart
+ * Send an xmpp:restart stanza.
+ */
+ _sendRestart: function ()
+ {
+ this._data.push("restart");
+
+ this._throttledRequestHandler();
+ clearTimeout(this._idleTimeout);
+ this._idleTimeout = setTimeout(this._onIdle.bind(this), 100);
+ },
+
+ /** Function: addTimedHandler
+ * Add a timed handler to the connection.
+ *
+ * This function adds a timed handler. The provided handler will
+ * be called every period milliseconds until it returns false,
+ * the connection is terminated, or the handler is removed. Handlers
+ * that wish to continue being invoked should return true.
+ *
+ * Because of method binding it is necessary to save the result of
+ * this function if you wish to remove a handler with
+ * deleteTimedHandler().
+ *
+ * Note that user handlers are not active until authentication is
+ * successful.
+ *
+ * Parameters:
+ * (Integer) period - The period of the handler.
+ * (Function) handler - The callback function.
+ *
+ * Returns:
+ * A reference to the handler that can be used to remove it.
+ */
+ addTimedHandler: function (period, handler)
+ {
+ var thand = new Strophe.TimedHandler(period, handler);
+ this.addTimeds.push(thand);
+ return thand;
+ },
+
+ /** Function: deleteTimedHandler
+ * Delete a timed handler for a connection.
+ *
+ * This function removes a timed handler from the connection. The
+ * handRef parameter is *not* the function passed to addTimedHandler(),
+ * but is the reference returned from addTimedHandler().
+ *
+ * Parameters:
+ * (Strophe.TimedHandler) handRef - The handler reference.
+ */
+ deleteTimedHandler: function (handRef)
+ {
+ // this must be done in the Idle loop so that we don't change
+ // the handlers during iteration
+ this.removeTimeds.push(handRef);
+ },
+
+ /** Function: addHandler
+ * Add a stanza handler for the connection.
+ *
+ * This function adds a stanza handler to the connection. The
+ * handler callback will be called for any stanza that matches
+ * the parameters. Note that if multiple parameters are supplied,
+ * they must all match for the handler to be invoked.
+ *
+ * The handler will receive the stanza that triggered it as its argument.
+ * The handler should return true if it is to be invoked again;
+ * returning false will remove the handler after it returns.
+ *
+ * As a convenience, the ns parameters applies to the top level element
+ * and also any of its immediate children. This is primarily to make
+ * matching /iq/query elements easy.
+ *
+ * The options argument contains handler matching flags that affect how
+ * matches are determined. Currently the only flag is matchBare (a
+ * boolean). When matchBare is true, the from parameter and the from
+ * attribute on the stanza will be matched as bare JIDs instead of
+ * full JIDs. To use this, pass {matchBare: true} as the value of
+ * options. The default value for matchBare is false.
+ *
+ * The return value should be saved if you wish to remove the handler
+ * with deleteHandler().
+ *
+ * Parameters:
+ * (Function) handler - The user callback.
+ * (String) ns - The namespace to match.
+ * (String) name - The stanza name to match.
+ * (String) type - The stanza type attribute to match.
+ * (String) id - The stanza id attribute to match.
+ * (String) from - The stanza from attribute to match.
+ * (String) options - The handler options
+ *
+ * Returns:
+ * A reference to the handler that can be used to remove it.
+ */
+ addHandler: function (handler, ns, name, type, id, from, options)
+ {
+ var hand = new Strophe.Handler(handler, ns, name, type, id, from, options);
+ this.addHandlers.push(hand);
+ return hand;
+ },
+
+ /** Function: deleteHandler
+ * Delete a stanza handler for a connection.
+ *
+ * This function removes a stanza handler from the connection. The
+ * handRef parameter is *not* the function passed to addHandler(),
+ * but is the reference returned from addHandler().
+ *
+ * Parameters:
+ * (Strophe.Handler) handRef - The handler reference.
+ */
+ deleteHandler: function (handRef)
+ {
+ // this must be done in the Idle loop so that we don't change
+ // the handlers during iteration
+ this.removeHandlers.push(handRef);
+ },
+
+ /** Function: disconnect
+ * Start the graceful disconnection process.
+ *
+ * This function starts the disconnection process. This process starts
+ * by sending unavailable presence and sending BOSH body of type
+ * terminate. A timeout handler makes sure that disconnection happens
+ * even if the BOSH server does not respond.
+ *
+ * The user supplied connection callback will be notified of the
+ * progress as this process happens.
+ *
+ * Parameters:
+ * (String) reason - The reason the disconnect is occuring.
+ */
+ disconnect: function (reason)
+ {
+ this._changeConnectStatus(Strophe.Status.DISCONNECTING, reason);
+
+ Strophe.info("Disconnect was called because: " + reason);
+ if (this.connected) {
+ // setup timeout handler
+ this._disconnectTimeout = this._addSysTimedHandler(
+ 3000, this._onDisconnectTimeout.bind(this));
+ this._sendTerminate();
+ }
+ },
+
+ /** PrivateFunction: _changeConnectStatus
+ * _Private_ helper function that makes sure plugins and the user's
+ * callback are notified of connection status changes.
+ *
+ * Parameters:
+ * (Integer) status - the new connection status, one of the values
+ * in Strophe.Status
+ * (String) condition - the error condition or null
+ */
+ _changeConnectStatus: function (status, condition)
+ {
+ // notify all plugins listening for status changes
+ for (var k in Strophe._connectionPlugins) {
+ if (Strophe._connectionPlugins.hasOwnProperty(k)) {
+ var plugin = this[k];
+ if (plugin.statusChanged) {
+ try {
+ plugin.statusChanged(status, condition);
+ } catch (err) {
+ Strophe.error("" + k + " plugin caused an exception " +
+ "changing status: " + err);
+ }
+ }
+ }
+ }
+
+ // notify the user's callback
+ if (this.connect_callback) {
+ try {
+ this.connect_callback(status, condition);
+ } catch (e) {
+ Strophe.error("User connection callback caused an " +
+ "exception: " + e);
+ }
+ }
+ },
+
+ /** PrivateFunction: _buildBody
+ * _Private_ helper function to generate the <body/> wrapper for BOSH.
+ *
+ * Returns:
+ * A Strophe.Builder with a <body/> element.
+ */
+ _buildBody: function ()
+ {
+ var bodyWrap = $build('body', {
+ rid: this.rid++,
+ xmlns: Strophe.NS.HTTPBIND
+ });
+
+ if (this.sid !== null) {
+ bodyWrap.attrs({sid: this.sid});
+ }
+
+ return bodyWrap;
+ },
+
+ /** PrivateFunction: _removeRequest
+ * _Private_ function to remove a request from the queue.
+ *
+ * Parameters:
+ * (Strophe.Request) req - The request to remove.
+ */
+ _removeRequest: function (req)
+ {
+ Strophe.debug("removing request");
+
+ var i;
+ for (i = this._requests.length - 1; i >= 0; i--) {
+ if (req == this._requests[i]) {
+ this._requests.splice(i, 1);
+ }
+ }
+
+ // IE6 fails on setting to null, so set to empty function
+ req.xhr.onreadystatechange = function () {};
+
+ this._throttledRequestHandler();
+ },
+
+ /** PrivateFunction: _restartRequest
+ * _Private_ function to restart a request that is presumed dead.
+ *
+ * Parameters:
+ * (Integer) i - The index of the request in the queue.
+ */
+ _restartRequest: function (i)
+ {
+ var req = this._requests[i];
+ if (req.dead === null) {
+ req.dead = new Date();
+ }
+
+ this._processRequest(i);
+ },
+
+ /** PrivateFunction: _processRequest
+ * _Private_ function to process a request in the queue.
+ *
+ * This function takes requests off the queue and sends them and
+ * restarts dead requests.
+ *
+ * Parameters:
+ * (Integer) i - The index of the request in the queue.
+ */
+ _processRequest: function (i)
+ {
+ var req = this._requests[i];
+ var reqStatus = -1;
+
+ try {
+ if (req.xhr.readyState == 4) {
+ reqStatus = req.xhr.status;
+ }
+ } catch (e) {
+ Strophe.error("caught an error in _requests[" + i +
+ "], reqStatus: " + reqStatus);
+ }
+
+ if (typeof(reqStatus) == "undefined") {
+ reqStatus = -1;
+ }
+
+ // make sure we limit the number of retries
+ if (req.sends > 5) {
+ this._onDisconnectTimeout();
+ return;
+ }
+
+ var time_elapsed = req.age();
+ var primaryTimeout = (!isNaN(time_elapsed) &&
+ time_elapsed > Math.floor(Strophe.TIMEOUT * this.wait));
+ var secondaryTimeout = (req.dead !== null &&
+ req.timeDead() > Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait));
+ var requestCompletedWithServerError = (req.xhr.readyState == 4 &&
+ (reqStatus < 1 ||
+ reqStatus >= 500));
+ if (primaryTimeout || secondaryTimeout ||
+ requestCompletedWithServerError) {
+ if (secondaryTimeout) {
+ Strophe.error("Request " +
+ this._requests[i].id +
+ " timed out (secondary), restarting");
+ }
+ req.abort = true;
+ req.xhr.abort();
+ // setting to null fails on IE6, so set to empty function
+ req.xhr.onreadystatechange = function () {};
+ this._requests[i] = new Strophe.Request(req.xmlData,
+ req.origFunc,
+ req.rid,
+ req.sends);
+ req = this._requests[i];
+ }
+
+ if (req.xhr.readyState === 0) {
+ Strophe.debug("request id " + req.id +
+ "." + req.sends + " posting");
+
+ req.date = new Date();
+ try {
+ req.xhr.open("POST", this.service, true);
+ } catch (e2) {
+ Strophe.error("XHR open failed.");
+ if (!this.connected) {
+ this._changeConnectStatus(Strophe.Status.CONNFAIL,
+ "bad-service");
+ }
+ this.disconnect();
+ return;
+ }
+
+ // Fires the XHR request -- may be invoked immediately
+ // or on a gradually expanding retry window for reconnects
+ var sendFunc = function () {
+ req.xhr.send(req.data);
+ };
+
+ // Implement progressive backoff for reconnects --
+ // First retry (send == 1) should also be instantaneous
+ if (req.sends > 1) {
+ // Using a cube of the retry number creats a nicely
+ // expanding retry window
+ var backoff = Math.pow(req.sends, 3) * 1000;
+ setTimeout(sendFunc, backoff);
+ } else {
+ sendFunc();
+ }
+
+ req.sends++;
+
+ this.xmlOutput(req.xmlData);
+ this.rawOutput(req.data);
+ } else {
+ Strophe.debug("_processRequest: " +
+ (i === 0 ? "first" : "second") +
+ " request has readyState of " +
+ req.xhr.readyState);
+ }
+ },
+
+ /** PrivateFunction: _throttledRequestHandler
+ * _Private_ function to throttle requests to the connection window.
+ *
+ * This function makes sure we don't send requests so fast that the
+ * request ids overflow the connection window in the case that one
+ * request died.
+ */
+ _throttledRequestHandler: function ()
+ {
+ if (!this._requests) {
+ Strophe.debug("_throttledRequestHandler called with " +
+ "undefined requests");
+ } else {
+ Strophe.debug("_throttledRequestHandler called with " +
+ this._requests.length + " requests");
+ }
+
+ if (!this._requests || this._requests.length === 0) {
+ return;
+ }
+
+ if (this._requests.length > 0) {
+ this._processRequest(0);
+ }
+
+ if (this._requests.length > 1 &&
+ Math.abs(this._requests[0].rid -
+ this._requests[1].rid) < this.window) {
+ this._processRequest(1);
+ }
+ },
+
+ /** PrivateFunction: _onRequestStateChange
+ * _Private_ handler for Strophe.Request state changes.
+ *
+ * This function is called when the XMLHttpRequest readyState changes.
+ * It contains a lot of error handling logic for the many ways that
+ * requests can fail, and calls the request callback when requests
+ * succeed.
+ *
+ * Parameters:
+ * (Function) func - The handler for the request.
+ * (Strophe.Request) req - The request that is changing readyState.
+ */
+ _onRequestStateChange: function (func, req)
+ {
+ Strophe.debug("request id " + req.id +
+ "." + req.sends + " state changed to " +
+ req.xhr.readyState);
+
+ if (req.abort) {
+ req.abort = false;
+ return;
+ }
+
+ // request complete
+ var reqStatus;
+ if (req.xhr.readyState == 4) {
+ reqStatus = 0;
+ try {
+ reqStatus = req.xhr.status;
+ } catch (e) {
+ // ignore errors from undefined status attribute. works
+ // around a browser bug
+ }
+
+ if (typeof(reqStatus) == "undefined") {
+ reqStatus = 0;
+ }
+
+ if (this.disconnecting) {
+ if (reqStatus >= 400) {
+ this._hitError(reqStatus);
+ return;
+ }
+ }
+
+ var reqIs0 = (this._requests[0] == req);
+ var reqIs1 = (this._requests[1] == req);
+
+ if ((reqStatus > 0 && reqStatus < 500) || req.sends > 5) {
+ // remove from internal queue
+ this._removeRequest(req);
+ Strophe.debug("request id " +
+ req.id +
+ " should now be removed");
+ }
+
+ // request succeeded
+ if (reqStatus == 200) {
+ // if request 1 finished, or request 0 finished and request
+ // 1 is over Strophe.SECONDARY_TIMEOUT seconds old, we need to
+ // restart the other - both will be in the first spot, as the
+ // completed request has been removed from the queue already
+ if (reqIs1 ||
+ (reqIs0 && this._requests.length > 0 &&
+ this._requests[0].age() > Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait))) {
+ this._restartRequest(0);
+ }
+ // call handler
+ Strophe.debug("request id " +
+ req.id + "." +
+ req.sends + " got 200");
+ func(req);
+ this.errors = 0;
+ } else {
+ Strophe.error("request id " +
+ req.id + "." +
+ req.sends + " error " + reqStatus +
+ " happened");
+ if (reqStatus === 0 ||
+ (reqStatus >= 400 && reqStatus < 600) ||
+ reqStatus >= 12000) {
+ this._hitError(reqStatus);
+ if (reqStatus >= 400 && reqStatus < 500) {
+ this._changeConnectStatus(Strophe.Status.DISCONNECTING,
+ null);
+ this._doDisconnect();
+ }
+ }
+ }
+
+ if (!((reqStatus > 0 && reqStatus < 500) ||
+ req.sends > 5)) {
+ this._throttledRequestHandler();
+ }
+ }
+ },
+
+ /** PrivateFunction: _hitError
+ * _Private_ function to handle the error count.
+ *
+ * Requests are resent automatically until their error count reaches
+ * 5. Each time an error is encountered, this function is called to
+ * increment the count and disconnect if the count is too high.
+ *
+ * Parameters:
+ * (Integer) reqStatus - The request status.
+ */
+ _hitError: function (reqStatus)
+ {
+ this.errors++;
+ Strophe.warn("request errored, status: " + reqStatus +
+ ", number of errors: " + this.errors);
+ if (this.errors > 4) {
+ this._onDisconnectTimeout();
+ }
+ },
+
+ /** PrivateFunction: _doDisconnect
+ * _Private_ function to disconnect.
+ *
+ * This is the last piece of the disconnection logic. This resets the
+ * connection and alerts the user's connection callback.
+ */
+ _doDisconnect: function ()
+ {
+ Strophe.info("_doDisconnect was called");
+ this.authenticated = false;
+ this.disconnecting = false;
+ this.sid = null;
+ this.streamId = null;
+ this.rid = Math.floor(Math.random() * 4294967295);
+
+ // tell the parent we disconnected
+ if (this.connected) {
+ this._changeConnectStatus(Strophe.Status.DISCONNECTED, null);
+ this.connected = false;
+ }
+
+ // delete handlers
+ this.handlers = [];
+ this.timedHandlers = [];
+ this.removeTimeds = [];
+ this.removeHandlers = [];
+ this.addTimeds = [];
+ this.addHandlers = [];
+ },
+
+ /** PrivateFunction: _dataRecv
+ * _Private_ handler to processes incoming data from the the connection.
+ *
+ * Except for _connect_cb handling the initial connection request,
+ * this function handles the incoming data for all requests. This
+ * function also fires stanza handlers that match each incoming
+ * stanza.
+ *
+ * Parameters:
+ * (Strophe.Request) req - The request that has data ready.
+ */
+ _dataRecv: function (req)
+ {
+ try {
+ var elem = req.getResponse();
+ } catch (e) {
+ if (e != "parsererror") { throw e; }
+ this.disconnect("strophe-parsererror");
+ }
+ if (elem === null) { return; }
+
+ this.xmlInput(elem);
+ this.rawInput(Strophe.serialize(elem));
+
+ // remove handlers scheduled for deletion
+ var i, hand;
+ while (this.removeHandlers.length > 0) {
+ hand = this.removeHandlers.pop();
+ i = this.handlers.indexOf(hand);
+ if (i >= 0) {
+ this.handlers.splice(i, 1);
+ }
+ }
+
+ // add handlers scheduled for addition
+ while (this.addHandlers.length > 0) {
+ this.handlers.push(this.addHandlers.pop());
+ }
+
+ // handle graceful disconnect
+ if (this.disconnecting && this._requests.length === 0) {
+ this.deleteTimedHandler(this._disconnectTimeout);
+ this._disconnectTimeout = null;
+ this._doDisconnect();
+ return;
+ }
+
+ var typ = elem.getAttribute("type");
+ var cond, conflict;
+ if (typ !== null && typ == "terminate") {
+ // Don't process stanzas that come in after disconnect
+ if (this.disconnecting) {
+ return;
+ }
+
+ // an error occurred
+ cond = elem.getAttribute("condition");
+ conflict = elem.getElementsByTagName("conflict");
+ if (cond !== null) {
+ if (cond == "remote-stream-error" && conflict.length > 0) {
+ cond = "conflict";
+ }
+ this._changeConnectStatus(Strophe.Status.CONNFAIL, cond);
+ } else {
+ this._changeConnectStatus(Strophe.Status.CONNFAIL, "unknown");
+ }
+ this.disconnect();
+ return;
+ }
+
+ // send each incoming stanza through the handler chain
+ var that = this;
+ Strophe.forEachChild(elem, null, function (child) {
+ var i, newList;
+ // process handlers
+ newList = that.handlers;
+ that.handlers = [];
+ for (i = 0; i < newList.length; i++) {
+ var hand = newList[i];
+ if (hand.isMatch(child) &&
+ (that.authenticated || !hand.user)) {
+ if (hand.run(child)) {
+ that.handlers.push(hand);
+ }
+ } else {
+ that.handlers.push(hand);
+ }
+ }
+ });
+ },
+
+ /** PrivateFunction: _sendTerminate
+ * _Private_ function to send initial disconnect sequence.
+ *
+ * This is the first step in a graceful disconnect. It sends
+ * the BOSH server a terminate body and includes an unavailable
+ * presence if authentication has completed.
+ */
+ _sendTerminate: function ()
+ {
+ Strophe.info("_sendTerminate was called");
+ var body = this._buildBody().attrs({type: "terminate"});
+
+ if (this.authenticated) {
+ body.c('presence', {
+ xmlns: Strophe.NS.CLIENT,
+ type: 'unavailable'
+ });
+ }
+
+ this.disconnecting = true;
+
+ var req = new Strophe.Request(body.tree(),
+ this._onRequestStateChange.bind(
+ this, this._dataRecv.bind(this)),
+ body.tree().getAttribute("rid"));
+
+ this._requests.push(req);
+ this._throttledRequestHandler();
+ },
+
+ /** PrivateFunction: _connect_cb
+ * _Private_ handler for initial connection request.
+ *
+ * This handler is used to process the initial connection request
+ * response from the BOSH server. It is used to set up authentication
+ * handlers and start the authentication process.
+ *
+ * SASL authentication will be attempted if available, otherwise
+ * the code will fall back to legacy authentication.
+ *
+ * Parameters:
+ * (Strophe.Request) req - The current request.
+ */
+ _connect_cb: function (req)
+ {
+ Strophe.info("_connect_cb was called");
+
+ this.connected = true;
+ var bodyWrap = req.getResponse();
+ if (!bodyWrap) { return; }
+
+ this.xmlInput(bodyWrap);
+ this.rawInput(Strophe.serialize(bodyWrap));
+
+ var typ = bodyWrap.getAttribute("type");
+ var cond, conflict;
+ if (typ !== null && typ == "terminate") {
+ // an error occurred
+ cond = bodyWrap.getAttribute("condition");
+ conflict = bodyWrap.getElementsByTagName("conflict");
+ if (cond !== null) {
+ if (cond == "remote-stream-error" && conflict.length > 0) {
+ cond = "conflict";
+ }
+ this._changeConnectStatus(Strophe.Status.CONNFAIL, cond);
+ } else {
+ this._changeConnectStatus(Strophe.Status.CONNFAIL, "unknown");
+ }
+ return;
+ }
+
+ // check to make sure we don't overwrite these if _connect_cb is
+ // called multiple times in the case of missing stream:features
+ if (!this.sid) {
+ this.sid = bodyWrap.getAttribute("sid");
+ }
+ if (!this.stream_id) {
+ this.stream_id = bodyWrap.getAttribute("authid");
+ }
+ var wind = bodyWrap.getAttribute('requests');
+ if (wind) { this.window = parseInt(wind, 10); }
+ var hold = bodyWrap.getAttribute('hold');
+ if (hold) { this.hold = parseInt(hold, 10); }
+ var wait = bodyWrap.getAttribute('wait');
+ if (wait) { this.wait = parseInt(wait, 10); }
+
+
+ var do_sasl_plain = false;
+ var do_sasl_digest_md5 = false;
+ var do_sasl_anonymous = false;
+
+ var mechanisms = bodyWrap.getElementsByTagName("mechanism");
+ var i, mech, auth_str, hashed_auth_str;
+ if (mechanisms.length > 0) {
+ for (i = 0; i < mechanisms.length; i++) {
+ mech = Strophe.getText(mechanisms[i]);
+ if (mech == 'DIGEST-MD5') {
+ do_sasl_digest_md5 = true;
+ } else if (mech == 'PLAIN') {
+ do_sasl_plain = true;
+ } else if (mech == 'ANONYMOUS') {
+ do_sasl_anonymous = true;
+ }
+ }
+ } else {
+ // we didn't get stream:features yet, so we need wait for it
+ // by sending a blank poll request
+ var body = this._buildBody();
+ this._requests.push(
+ new Strophe.Request(body.tree(),
+ this._onRequestStateChange.bind(
+ this, this._connect_cb.bind(this)),
+ body.tree().getAttribute("rid")));
+ this._throttledRequestHandler();
+ return;
+ }
+
+ if (Strophe.getNodeFromJid(this.jid) === null &&
+ do_sasl_anonymous) {
+ this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null);
+ this._sasl_success_handler = this._addSysHandler(
+ this._sasl_success_cb.bind(this), null,
+ "success", null, null);
+ this._sasl_failure_handler = this._addSysHandler(
+ this._sasl_failure_cb.bind(this), null,
+ "failure", null, null);
+
+ this.send($build("auth", {
+ xmlns: Strophe.NS.SASL,
+ mechanism: "ANONYMOUS"
+ }).tree());
+ } else if (Strophe.getNodeFromJid(this.jid) === null) {
+ // we don't have a node, which is required for non-anonymous
+ // client connections
+ this._changeConnectStatus(Strophe.Status.CONNFAIL,
+ 'x-strophe-bad-non-anon-jid');
+ this.disconnect();
+ } else if (do_sasl_digest_md5) {
+ this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null);
+ this._sasl_challenge_handler = this._addSysHandler(
+ this._sasl_challenge1_cb.bind(this), null,
+ "challenge", null, null);
+ this._sasl_failure_handler = this._addSysHandler(
+ this._sasl_failure_cb.bind(this), null,
+ "failure", null, null);
+
+ this.send($build("auth", {
+ xmlns: Strophe.NS.SASL,
+ mechanism: "DIGEST-MD5"
+ }).tree());
+ } else if (do_sasl_plain) {
+ // Build the plain auth string (barejid null
+ // username null password) and base 64 encoded.
+ auth_str = Strophe.getBareJidFromJid(this.jid);
+ auth_str = auth_str + "\u0000";
+ auth_str = auth_str + Strophe.getNodeFromJid(this.jid);
+ auth_str = auth_str + "\u0000";
+ auth_str = auth_str + this.pass;
+
+ this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null);
+ this._sasl_success_handler = this._addSysHandler(
+ this._sasl_success_cb.bind(this), null,
+ "success", null, null);
+ this._sasl_failure_handler = this._addSysHandler(
+ this._sasl_failure_cb.bind(this), null,
+ "failure", null, null);
+
+ hashed_auth_str = Base64.encode(auth_str);
+ this.send($build("auth", {
+ xmlns: Strophe.NS.SASL,
+ mechanism: "PLAIN"
+ }).t(hashed_auth_str).tree());
+ } else {
+ this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null);
+ this._addSysHandler(this._auth1_cb.bind(this), null, null,
+ null, "_auth_1");
+
+ this.send($iq({
+ type: "get",
+ to: this.domain,
+ id: "_auth_1"
+ }).c("query", {
+ xmlns: Strophe.NS.AUTH
+ }).c("username", {}).t(Strophe.getNodeFromJid(this.jid)).tree());
+ }
+ },
+
+ /** PrivateFunction: _sasl_challenge1_cb
+ * _Private_ handler for DIGEST-MD5 SASL authentication.
+ *
+ * Parameters:
+ * (XMLElement) elem - The challenge stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_challenge1_cb: function (elem)
+ {
+ var attribMatch = /([a-z]+)=("[^"]+"|[^,"]+)(?:,|$)/;
+
+ var challenge = Base64.decode(Strophe.getText(elem));
+ var cnonce = MD5.hexdigest(Math.random() * 1234567890);
+ var realm = "";
+ var host = null;
+ var nonce = "";
+ var qop = "";
+ var matches;
+
+ // remove unneeded handlers
+ this.deleteHandler(this._sasl_failure_handler);
+
+ while (challenge.match(attribMatch)) {
+ matches = challenge.match(attribMatch);
+ challenge = challenge.replace(matches[0], "");
+ matches[2] = matches[2].replace(/^"(.+)"$/, "$1");
+ switch (matches[1]) {
+ case "realm":
+ realm = matches[2];
+ break;
+ case "nonce":
+ nonce = matches[2];
+ break;
+ case "qop":
+ qop = matches[2];
+ break;
+ case "host":
+ host = matches[2];
+ break;
+ }
+ }
+
+ var digest_uri = "xmpp/" + this.domain;
+ if (host !== null) {
+ digest_uri = digest_uri + "/" + host;
+ }
+
+ var A1 = MD5.hash(Strophe.getNodeFromJid(this.jid) +
+ ":" + realm + ":" + this.pass) +
+ ":" + nonce + ":" + cnonce;
+ var A2 = 'AUTHENTICATE:' + digest_uri;
+
+ var responseText = "";
+ responseText += 'username=' +
+ this._quote(Strophe.getNodeFromJid(this.jid)) + ',';
+ responseText += 'realm=' + this._quote(realm) + ',';
+ responseText += 'nonce=' + this._quote(nonce) + ',';
+ responseText += 'cnonce=' + this._quote(cnonce) + ',';
+ responseText += 'nc="00000001",';
+ responseText += 'qop="auth",';
+ responseText += 'digest-uri=' + this._quote(digest_uri) + ',';
+ responseText += 'response=' + this._quote(
+ MD5.hexdigest(MD5.hexdigest(A1) + ":" +
+ nonce + ":00000001:" +
+ cnonce + ":auth:" +
+ MD5.hexdigest(A2))) + ',';
+ responseText += 'charset="utf-8"';
+
+ this._sasl_challenge_handler = this._addSysHandler(
+ this._sasl_challenge2_cb.bind(this), null,
+ "challenge", null, null);
+ this._sasl_success_handler = this._addSysHandler(
+ this._sasl_success_cb.bind(this), null,
+ "success", null, null);
+ this._sasl_failure_handler = this._addSysHandler(
+ this._sasl_failure_cb.bind(this), null,
+ "failure", null, null);
+
+ this.send($build('response', {
+ xmlns: Strophe.NS.SASL
+ }).t(Base64.encode(responseText)).tree());
+
+ return false;
+ },
+
+ /** PrivateFunction: _quote
+ * _Private_ utility function to backslash escape and quote strings.
+ *
+ * Parameters:
+ * (String) str - The string to be quoted.
+ *
+ * Returns:
+ * quoted string
+ */
+ _quote: function (str)
+ {
+ return '"' + str.replace(/\\/g, "\\\\").replace(/"/g, '\\"') + '"';
+ //" end string workaround for emacs
+ },
+
+
+ /** PrivateFunction: _sasl_challenge2_cb
+ * _Private_ handler for second step of DIGEST-MD5 SASL authentication.
+ *
+ * Parameters:
+ * (XMLElement) elem - The challenge stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_challenge2_cb: function (elem)
+ {
+ // remove unneeded handlers
+ this.deleteHandler(this._sasl_success_handler);
+ this.deleteHandler(this._sasl_failure_handler);
+
+ this._sasl_success_handler = this._addSysHandler(
+ this._sasl_success_cb.bind(this), null,
+ "success", null, null);
+ this._sasl_failure_handler = this._addSysHandler(
+ this._sasl_failure_cb.bind(this), null,
+ "failure", null, null);
+ this.send($build('response', {xmlns: Strophe.NS.SASL}).tree());
+ return false;
+ },
+
+ /** PrivateFunction: _auth1_cb
+ * _Private_ handler for legacy authentication.
+ *
+ * This handler is called in response to the initial <iq type='get'/>
+ * for legacy authentication. It builds an authentication <iq/> and
+ * sends it, creating a handler (calling back to _auth2_cb()) to
+ * handle the result
+ *
+ * Parameters:
+ * (XMLElement) elem - The stanza that triggered the callback.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _auth1_cb: function (elem)
+ {
+ // build plaintext auth iq
+ var iq = $iq({type: "set", id: "_auth_2"})
+ .c('query', {xmlns: Strophe.NS.AUTH})
+ .c('username', {}).t(Strophe.getNodeFromJid(this.jid))
+ .up()
+ .c('password').t(this.pass);
+
+ if (!Strophe.getResourceFromJid(this.jid)) {
+ // since the user has not supplied a resource, we pick
+ // a default one here. unlike other auth methods, the server
+ // cannot do this for us.
+ this.jid = Strophe.getBareJidFromJid(this.jid) + '/strophe';
+ }
+ iq.up().c('resource', {}).t(Strophe.getResourceFromJid(this.jid));
+
+ this._addSysHandler(this._auth2_cb.bind(this), null,
+ null, null, "_auth_2");
+
+ this.send(iq.tree());
+
+ return false;
+ },
+
+ /** PrivateFunction: _sasl_success_cb
+ * _Private_ handler for succesful SASL authentication.
+ *
+ * Parameters:
+ * (XMLElement) elem - The matching stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_success_cb: function (elem)
+ {
+ Strophe.info("SASL authentication succeeded.");
+
+ // remove old handlers
+ this.deleteHandler(this._sasl_failure_handler);
+ this._sasl_failure_handler = null;
+ if (this._sasl_challenge_handler) {
+ this.deleteHandler(this._sasl_challenge_handler);
+ this._sasl_challenge_handler = null;
+ }
+
+ this._addSysHandler(this._sasl_auth1_cb.bind(this), null,
+ "stream:features", null, null);
+
+ // we must send an xmpp:restart now
+ this._sendRestart();
+
+ return false;
+ },
+
+ /** PrivateFunction: _sasl_auth1_cb
+ * _Private_ handler to start stream binding.
+ *
+ * Parameters:
+ * (XMLElement) elem - The matching stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_auth1_cb: function (elem)
+ {
+ // save stream:features for future usage
+ this.features = elem;
+
+ var i, child;
+
+ for (i = 0; i < elem._childNodes.length; i++) {
+ child = elem._childNodes[i];
+ if (child.nodeName.toLowerCase() == 'bind') {
+ this.do_bind = true;
+ }
+
+ if (child.nodeName.toLowerCase() == 'session') {
+ this.do_session = true;
+ }
+ }
+
+ if (!this.do_bind) {
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ return false;
+ } else {
+ this._addSysHandler(this._sasl_bind_cb.bind(this), null, null,
+ null, "_bind_auth_2");
+
+ var resource = Strophe.getResourceFromJid(this.jid);
+ if (resource) {
+ this.send($iq({type: "set", id: "_bind_auth_2"})
+ .c('bind', {xmlns: Strophe.NS.BIND})
+ .c('resource', {}).t(resource).tree());
+ } else {
+ this.send($iq({type: "set", id: "_bind_auth_2"})
+ .c('bind', {xmlns: Strophe.NS.BIND})
+ .tree());
+ }
+ }
+
+ return false;
+ },
+
+ /** PrivateFunction: _sasl_bind_cb
+ * _Private_ handler for binding result and session start.
+ *
+ * Parameters:
+ * (XMLElement) elem - The matching stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_bind_cb: function (elem)
+ {
+ if (elem.getAttribute("type") == "error") {
+ Strophe.info("SASL binding failed.");
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ return false;
+ }
+
+ // TODO - need to grab errors
+ var bind = elem.getElementsByTagName("bind");
+ var jidNode;
+ if (bind.length > 0) {
+ // Grab jid
+ jidNode = bind[0].getElementsByTagName("jid");
+ if (jidNode.length > 0) {
+ this.jid = Strophe.getText(jidNode[0]);
+
+ if (this.do_session) {
+ this._addSysHandler(this._sasl_session_cb.bind(this),
+ null, null, null, "_session_auth_2");
+
+ this.send($iq({type: "set", id: "_session_auth_2"})
+ .c('session', {xmlns: Strophe.NS.SESSION})
+ .tree());
+ } else {
+ this.authenticated = true;
+ this._changeConnectStatus(Strophe.Status.CONNECTED, null);
+ }
+ }
+ } else {
+ Strophe.info("SASL binding failed.");
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ return false;
+ }
+ },
+
+ /** PrivateFunction: _sasl_session_cb
+ * _Private_ handler to finish successful SASL connection.
+ *
+ * This sets Connection.authenticated to true on success, which
+ * starts the processing of user handlers.
+ *
+ * Parameters:
+ * (XMLElement) elem - The matching stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_session_cb: function (elem)
+ {
+ if (elem.getAttribute("type") == "result") {
+ this.authenticated = true;
+ this._changeConnectStatus(Strophe.Status.CONNECTED, null);
+ } else if (elem.getAttribute("type") == "error") {
+ Strophe.info("Session creation failed.");
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ return false;
+ }
+
+ return false;
+ },
+
+ /** PrivateFunction: _sasl_failure_cb
+ * _Private_ handler for SASL authentication failure.
+ *
+ * Parameters:
+ * (XMLElement) elem - The matching stanza.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _sasl_failure_cb: function (elem)
+ {
+ // delete unneeded handlers
+ if (this._sasl_success_handler) {
+ this.deleteHandler(this._sasl_success_handler);
+ this._sasl_success_handler = null;
+ }
+ if (this._sasl_challenge_handler) {
+ this.deleteHandler(this._sasl_challenge_handler);
+ this._sasl_challenge_handler = null;
+ }
+
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ return false;
+ },
+
+ /** PrivateFunction: _auth2_cb
+ * _Private_ handler to finish legacy authentication.
+ *
+ * This handler is called when the result from the jabber:iq:auth
+ * <iq/> stanza is returned.
+ *
+ * Parameters:
+ * (XMLElement) elem - The stanza that triggered the callback.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _auth2_cb: function (elem)
+ {
+ if (elem.getAttribute("type") == "result") {
+ this.authenticated = true;
+ this._changeConnectStatus(Strophe.Status.CONNECTED, null);
+ } else if (elem.getAttribute("type") == "error") {
+ this._changeConnectStatus(Strophe.Status.AUTHFAIL, null);
+ this.disconnect();
+ }
+
+ return false;
+ },
+
+ /** PrivateFunction: _addSysTimedHandler
+ * _Private_ function to add a system level timed handler.
+ *
+ * This function is used to add a Strophe.TimedHandler for the
+ * library code. System timed handlers are allowed to run before
+ * authentication is complete.
+ *
+ * Parameters:
+ * (Integer) period - The period of the handler.
+ * (Function) handler - The callback function.
+ */
+ _addSysTimedHandler: function (period, handler)
+ {
+ var thand = new Strophe.TimedHandler(period, handler);
+ thand.user = false;
+ this.addTimeds.push(thand);
+ return thand;
+ },
+
+ /** PrivateFunction: _addSysHandler
+ * _Private_ function to add a system level stanza handler.
+ *
+ * This function is used to add a Strophe.Handler for the
+ * library code. System stanza handlers are allowed to run before
+ * authentication is complete.
+ *
+ * Parameters:
+ * (Function) handler - The callback function.
+ * (String) ns - The namespace to match.
+ * (String) name - The stanza name to match.
+ * (String) type - The stanza type attribute to match.
+ * (String) id - The stanza id attribute to match.
+ */
+ _addSysHandler: function (handler, ns, name, type, id)
+ {
+ var hand = new Strophe.Handler(handler, ns, name, type, id);
+ hand.user = false;
+ this.addHandlers.push(hand);
+ return hand;
+ },
+
+ /** PrivateFunction: _onDisconnectTimeout
+ * _Private_ timeout handler for handling non-graceful disconnection.
+ *
+ * If the graceful disconnect process does not complete within the
+ * time allotted, this handler finishes the disconnect anyway.
+ *
+ * Returns:
+ * false to remove the handler.
+ */
+ _onDisconnectTimeout: function ()
+ {
+ Strophe.info("_onDisconnectTimeout was called");
+
+ // cancel all remaining requests and clear the queue
+ var req;
+ while (this._requests.length > 0) {
+ req = this._requests.pop();
+ req.abort = true;
+ req.xhr.abort();
+ // jslint complains, but this is fine. setting to empty func
+ // is necessary for IE6
+ req.xhr.onreadystatechange = function () {};
+ }
+
+ // actually disconnect
+ this._doDisconnect();
+
+ return false;
+ },
+
+ /** PrivateFunction: _onIdle
+ * _Private_ handler to process events during idle cycle.
+ *
+ * This handler is called every 100ms to fire timed handlers that
+ * are ready and keep poll requests going.
+ */
+ _onIdle: function ()
+ {
+ var i, thand, since, newList;
+
+ // add timed handlers scheduled for addition
+ // NOTE: we add before remove in the case a timed handler is
+ // added and then deleted before the next _onIdle() call.
+ while (this.addTimeds.length > 0) {
+ this.timedHandlers.push(this.addTimeds.pop());
+ }
+
+ // remove timed handlers that have been scheduled for deletion
+ while (this.removeTimeds.length > 0) {
+ thand = this.removeTimeds.pop();
+ i = this.timedHandlers.indexOf(thand);
+ if (i >= 0) {
+ this.timedHandlers.splice(i, 1);
+ }
+ }
+
+ // call ready timed handlers
+ var now = new Date().getTime();
+ newList = [];
+ for (i = 0; i < this.timedHandlers.length; i++) {
+ thand = this.timedHandlers[i];
+ if (this.authenticated || !thand.user) {
+ since = thand.lastCalled + thand.period;
+ if (since - now <= 0) {
+ if (thand.run()) {
+ newList.push(thand);
+ }
+ } else {
+ newList.push(thand);
+ }
+ }
+ }
+ this.timedHandlers = newList;
+
+ var body, time_elapsed;
+
+ // if no requests are in progress, poll
+ if (this.authenticated && this._requests.length === 0 &&
+ this._data.length === 0 && !this.disconnecting) {
+ Strophe.info("no requests during idle cycle, sending " +
+ "blank request");
+ this._data.push(null);
+ }
+
+ if (this._requests.length < 2 && this._data.length > 0 &&
+ !this.paused) {
+ body = this._buildBody();
+ for (i = 0; i < this._data.length; i++) {
+ if (this._data[i] !== null) {
+ if (this._data[i] === "restart") {
+ body.attrs({
+ to: this.domain,
+ "xml:lang": "en",
+ "xmpp:restart": "true",
+ "xmlns:xmpp": Strophe.NS.BOSH
+ });
+ } else {
+ body.cnode(this._data[i]).up();
+ }
+ }
+ }
+ delete this._data;
+ this._data = [];
+ this._requests.push(
+ new Strophe.Request(body.tree(),
+ this._onRequestStateChange.bind(
+ this, this._dataRecv.bind(this)),
+ body.tree().getAttribute("rid")));
+ this._processRequest(this._requests.length - 1);
+ }
+
+ if (this._requests.length > 0) {
+ time_elapsed = this._requests[0].age();
+ if (this._requests[0].dead !== null) {
+ if (this._requests[0].timeDead() >
+ Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait)) {
+ this._throttledRequestHandler();
+ }
+ }
+
+ if (time_elapsed > Math.floor(Strophe.TIMEOUT * this.wait)) {
+ Strophe.warn("Request " +
+ this._requests[0].id +
+ " timed out, over " + Math.floor(Strophe.TIMEOUT * this.wait) +
+ " seconds since last activity");
+ this._throttledRequestHandler();
+ }
+ }
+
+ // reactivate the timer
+ clearTimeout(this._idleTimeout);
+ this._idleTimeout = setTimeout(this._onIdle.bind(this), 100);
+ }
+};
+
+if (callback) {
+ callback(Strophe, $build, $msg, $iq, $pres);
+}
+
+})(function () {
+ window.Strophe = arguments[0];
+ window.$build = arguments[1];
+ window.$msg = arguments[2];
+ window.$iq = arguments[3];
+ window.$pres = arguments[4];
+});
diff --git a/contrib/jitsimeetbridge/unjingle/unjingle.js b/contrib/jitsimeetbridge/unjingle/unjingle.js
new file mode 100644
index 00000000..3dfe7599
--- /dev/null
+++ b/contrib/jitsimeetbridge/unjingle/unjingle.js
@@ -0,0 +1,48 @@
+var strophe = require("./strophe/strophe.js").Strophe;
+
+var Strophe = strophe.Strophe;
+var $iq = strophe.$iq;
+var $msg = strophe.$msg;
+var $build = strophe.$build;
+var $pres = strophe.$pres;
+
+var jsdom = require("jsdom");
+var window = jsdom.jsdom().parentWindow;
+var $ = require('jquery')(window);
+
+var stropheJingle = require("./strophe.jingle.sdp.js");
+
+
+var input = '';
+
+process.stdin.on('readable', function() {
+ var chunk = process.stdin.read();
+ if (chunk !== null) {
+ input += chunk;
+ }
+});
+
+process.stdin.on('end', function() {
+ if (process.argv[2] == '--jingle') {
+ var elem = $(input);
+ // app does:
+ // sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
+ //console.log(elem.find('>content'));
+ var sdp = new stropheJingle.SDP('');
+ sdp.fromJingle(elem);
+ console.log(sdp.raw);
+ } else if (process.argv[2] == '--sdp') {
+ var sdp = new stropheJingle.SDP(input);
+ var accept = $iq({to: '%(tojid)s',
+ type: 'set'})
+ .c('jingle', {xmlns: 'urn:xmpp:jingle:1',
+ //action: 'session-accept',
+ action: '%(action)s',
+ initiator: '%(initiator)s',
+ responder: '%(responder)s',
+ sid: '%(sid)s' });
+ sdp.toJingle(accept, 'responder');
+ console.log(Strophe.serialize(accept));
+ }
+});
+
diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py
new file mode 100755
index 00000000..5dfaec3a
--- /dev/null
+++ b/contrib/scripts/kick_users.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+from argparse import ArgumentParser
+import json
+import requests
+import sys
+import urllib
+
+def _mkurl(template, kws):
+ for key in kws:
+ template = template.replace(key, kws[key])
+ return template
+
+def main(hs, room_id, access_token, user_id_prefix, why):
+ if not why:
+ why = "Automated kick."
+ print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
+ room_state_url = _mkurl(
+ "$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
+ {
+ "$HS": hs,
+ "$ROOM": room_id,
+ "$TOKEN": access_token
+ }
+ )
+ print "Getting room state => %s" % room_state_url
+ res = requests.get(room_state_url)
+ print "HTTP %s" % res.status_code
+ state_events = res.json()
+ if "error" in state_events:
+ print "FATAL"
+ print state_events
+ return
+
+ kick_list = []
+ room_name = room_id
+ for event in state_events:
+ if not event["type"] == "m.room.member":
+ if event["type"] == "m.room.name":
+ room_name = event["content"].get("name")
+ continue
+ if not event["content"].get("membership") == "join":
+ continue
+ if event["state_key"].startswith(user_id_prefix):
+ kick_list.append(event["state_key"])
+
+ if len(kick_list) == 0:
+ print "No user IDs match the prefix '%s'" % user_id_prefix
+ return
+
+ print "The following user IDs will be kicked from %s" % room_name
+ for uid in kick_list:
+ print uid
+ doit = raw_input("Continue? [Y]es\n")
+ if len(doit) > 0 and doit.lower() == 'y':
+ print "Kicking members..."
+ # encode them all
+ kick_list = [urllib.quote(uid) for uid in kick_list]
+ for uid in kick_list:
+ kick_url = _mkurl(
+ "$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
+ {
+ "$HS": hs,
+ "$UID": uid,
+ "$ROOM": room_id,
+ "$TOKEN": access_token
+ }
+ )
+ kick_body = {
+ "membership": "leave",
+ "reason": why
+ }
+ print "Kicking %s" % uid
+ res = requests.put(kick_url, data=json.dumps(kick_body))
+ if res.status_code != 200:
+ print "ERROR: HTTP %s" % res.status_code
+ if res.json().get("error"):
+ print "ERROR: JSON %s" % res.json()
+
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
+ parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
+ parser.add_argument("-t","--token",help="Your access_token")
+ parser.add_argument("-r","--room",help="The room ID to kick members in")
+ parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
+ parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
+ args = parser.parse_args()
+ if not args.room or not args.token or not args.user_id or not args.homeserver:
+ parser.print_help()
+ sys.exit(1)
+ else:
+ main(args.homeserver, args.room, args.token, args.user_id, args.why)
diff --git a/contrib/systemd/log_config.yaml b/contrib/systemd/log_config.yaml
new file mode 100644
index 00000000..d85bdd12
--- /dev/null
+++ b/contrib/systemd/log_config.yaml
@@ -0,0 +1,25 @@
+version: 1
+
+# In systemd's journal, loglevel is implicitly stored, so let's omit it
+# from the message text.
+formatters:
+ journal_fmt:
+ format: '%(name)s: [%(request)s] %(message)s'
+
+filters:
+ context:
+ (): synapse.util.logcontext.LoggingContextFilter
+ request: ""
+
+handlers:
+ journal:
+ class: systemd.journal.JournalHandler
+ formatter: journal_fmt
+ filters: [context]
+ SYSLOG_IDENTIFIER: synapse
+
+root:
+ level: INFO
+ handlers: [journal]
+
+disable_existing_loggers: False
diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service
new file mode 100644
index 00000000..2e8cd21c
--- /dev/null
+++ b/contrib/systemd/synapse.service
@@ -0,0 +1,16 @@
+# This assumes that Synapse has been installed as a system package
+# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
+# rather than in a user home directory or similar under virtualenv.
+
+[Unit]
+Description=Synapse Matrix homeserver
+
+[Service]
+Type=simple
+User=synapse
+Group=synapse
+WorkingDirectory=/var/lib/synapse
+ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
+
+[Install]
+WantedBy=multi-user.target
diff --git a/contrib/vertobot/.gitignore b/contrib/vertobot/.gitignore
new file mode 100644
index 00000000..071a7805
--- /dev/null
+++ b/contrib/vertobot/.gitignore
@@ -0,0 +1,2 @@
+vucbot.yaml
+vertobot.yaml
diff --git a/contrib/vertobot/bot.pl b/contrib/vertobot/bot.pl
new file mode 100755
index 00000000..31eed409
--- /dev/null
+++ b/contrib/vertobot/bot.pl
@@ -0,0 +1,309 @@
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use 5.010; # //
+use IO::Socket::SSL qw(SSL_VERIFY_NONE);
+use IO::Async::Loop;
+use Net::Async::WebSocket::Client;
+use Net::Async::Matrix 0.11_002;
+use JSON;
+use YAML;
+use Data::UUID;
+use Getopt::Long;
+use Data::Dumper;
+
+binmode STDOUT, ":encoding(UTF-8)";
+binmode STDERR, ":encoding(UTF-8)";
+
+my $loop = IO::Async::Loop->new;
+# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
+# https://rt.cpan.org/Ticket/Display.html?id=93107
+ref $loop eq "IO::Async::Loop::Poll" and
+ warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
+
+GetOptions(
+ 'C|config=s' => \my $CONFIG,
+ 'eval-from=s' => \my $EVAL_FROM,
+) or exit 1;
+
+if( defined $EVAL_FROM ) {
+ # An emergency 'eval() this file' hack
+ $SIG{HUP} = sub {
+ my $code = do {
+ open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
+ local $/; <$fh>
+ };
+
+ eval $code or warn "Cannot eval() - $@";
+ };
+}
+
+defined $CONFIG or die "Must supply --config\n";
+
+my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
+
+my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
+# No harm in always applying this
+$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
+
+# Track every Room object, so we can ->leave them all on shutdown
+my %bot_matrix_rooms;
+
+my $bridgestate = {};
+my $roomid_by_callid = {};
+
+my $bot_verto = Net::Async::WebSocket::Client->new(
+ on_frame => sub {
+ my ( $self, $frame ) = @_;
+ warn "[Verto] receiving $frame";
+ on_verto_json($frame);
+ },
+);
+$loop->add( $bot_verto );
+
+my $sessid = lc new Data::UUID->create_str();
+
+my $bot_matrix = Net::Async::Matrix->new(
+ %MATRIX_CONFIG,
+ on_log => sub { warn "log: @_\n" },
+ on_invite => sub {
+ my ($matrix, $invite) = @_;
+ warn "[Matrix] invited to: " . $invite->{room_id} . " by " . $invite->{inviter} . "\n";
+
+ $matrix->join_room( $invite->{room_id} )->get;
+ },
+ on_room_new => sub {
+ my ($matrix, $room) = @_;
+
+ warn "[Matrix] have a room ID: " . $room->room_id . "\n";
+
+ $bot_matrix_rooms{$room->room_id} = $room;
+
+ # log in to verto on behalf of this room
+ $bridgestate->{$room->room_id}->{sessid} = $sessid;
+
+ $room->configure(
+ on_message => \&on_room_message,
+ );
+
+ my $f = send_verto_json_request("login", {
+ 'login' => $CONFIG{'verto-dialog-params'}{'login'},
+ 'passwd' => $CONFIG{'verto-config'}{'passwd'},
+ 'sessid' => $sessid,
+ });
+ $matrix->adopt_future($f);
+
+ # we deliberately don't paginate the room, as we only care about
+ # new calls
+ },
+ on_unknown_event => \&on_unknown_event,
+ on_error => sub {
+ print STDERR "Matrix failure: @_\n";
+ },
+);
+$loop->add( $bot_matrix );
+
+sub on_unknown_event
+{
+ my ($matrix, $event) = @_;
+ print Dumper($event);
+
+ my $room_id = $event->{room_id};
+ my %dp = %{$CONFIG{'verto-dialog-params'}};
+ $dp{callID} = $bridgestate->{$room_id}->{callid};
+
+ if ($event->{type} eq 'm.call.invite') {
+ $bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
+ $bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
+ $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
+ $bridgestate->{$room_id}->{gathered_candidates} = 0;
+ $roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
+ # no trickle ICE in verto apparently
+ }
+ elsif ($event->{type} eq 'm.call.candidates') {
+ # XXX: compare call IDs
+ if (!$bridgestate->{$room_id}->{gathered_candidates}) {
+ $bridgestate->{$room_id}->{gathered_candidates} = 1;
+ my $offer = $bridgestate->{$room_id}->{offer};
+ my $candidate_block = {
+ audio => '',
+ video => '',
+ };
+ foreach (@{$event->{content}->{candidates}}) {
+ if ($_->{sdpMid}) {
+ $candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
+ }
+ else {
+ $candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
+ $candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
+ }
+ }
+
+ # XXX: assumes audio comes first
+ #$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
+ #$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
+
+ $offer =~ s/(m=video)/$candidate_block->{audio}$1/;
+ $offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
+
+ my $f = send_verto_json_request("verto.invite", {
+ "sdp" => $offer,
+ "dialogParams" => \%dp,
+ "sessid" => $bridgestate->{$room_id}->{sessid},
+ });
+ $matrix->adopt_future($f);
+ }
+ else {
+ # ignore them, as no trickle ICE, although we might as well
+ # batch them up
+ # foreach (@{$event->{content}->{candidates}}) {
+ # push @{$bridgestate->{$room_id}->{candidates}}, $_;
+ # }
+ }
+ }
+ elsif ($event->{type} eq 'm.call.hangup') {
+ if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
+ my $f = send_verto_json_request("verto.bye", {
+ "dialogParams" => \%dp,
+ "sessid" => $bridgestate->{$room_id}->{sessid},
+ });
+ $matrix->adopt_future($f);
+ }
+ else {
+ warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
+ }
+ }
+ else {
+ warn "Unhandled event: $event->{type}";
+ }
+}
+
+sub on_room_message
+{
+ my ($room, $from, $content) = @_;
+ my $room_id = $room->room_id;
+ warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
+}
+
+Future->needs_all(
+ $bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
+ $bot_matrix->start;
+ }),
+
+ $bot_verto->connect(
+ %{ $CONFIG{"verto-bot"} },
+ on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
+ on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
+ )->on_done( sub {
+ warn("[Verto] connected to websocket");
+ }),
+)->get;
+
+$loop->attach_signal(
+ PIPE => sub { warn "pipe\n" }
+);
+$loop->attach_signal(
+ INT => sub { $loop->stop },
+);
+$loop->attach_signal(
+ TERM => sub { $loop->stop },
+);
+
+eval {
+ $loop->run;
+} or my $e = $@;
+
+# When the bot gets shut down, have it leave the rooms so it's clear to observers
+# that it is no longer running.
+# if( $CONFIG{"leave-on-shutdown"} // 1 ) {
+# print STDERR "Removing bot from Matrix rooms...\n";
+# Future->wait_all( map { $_->leave->else_done() } values %bot_matrix_rooms )->get;
+# }
+# else {
+# print STDERR "Leaving bot users in Matrix rooms.\n";
+# }
+
+die $e if $e;
+
+exit 0;
+
+{
+ my $json_id;
+ my $requests;
+
+ sub send_verto_json_request
+ {
+ $json_id ||= 1;
+
+ my ($method, $params) = @_;
+ my $json = {
+ jsonrpc => "2.0",
+ method => $method,
+ params => $params,
+ id => $json_id,
+ };
+ my $text = JSON->new->encode( $json );
+ warn "[Verto] sending $text";
+ $bot_verto->send_frame ( $text );
+ my $request = $loop->new_future;
+ $requests->{$json_id} = $request;
+ $json_id++;
+ return $request;
+ }
+
+ sub send_verto_json_response
+ {
+ my ($result, $id) = @_;
+ my $json = {
+ jsonrpc => "2.0",
+ result => $result,
+ id => $id,
+ };
+ my $text = JSON->new->encode( $json );
+ warn "[Verto] sending $text";
+ $bot_verto->send_frame ( $text );
+ }
+
+ sub on_verto_json
+ {
+ my $json = JSON->new->decode( $_[0] );
+ if ($json->{method}) {
+ if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
+ $json->{method} eq 'verto.media') {
+
+ my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
+ my $room = $bot_matrix_rooms{$room_id};
+
+ if ($json->{params}->{sdp}) {
+ # HACK HACK HACK HACK
+ $room->_do_POST_json( "/send/m.call.answer", {
+ call_id => $bridgestate->{$room_id}->{matrix_callid},
+ version => 0,
+ answer => {
+ sdp => $json->{params}->{sdp},
+ type => "answer",
+ },
+ })->then( sub {
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ })->get;
+ }
+ }
+ else {
+ warn ("[Verto] unhandled method: " . $json->{method});
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ }
+ }
+ elsif ($json->{result}) {
+ $requests->{$json->{id}}->done($json->{result});
+ }
+ elsif ($json->{error}) {
+ $requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
+ }
+ }
+}
+
diff --git a/contrib/vertobot/bridge.pl b/contrib/vertobot/bridge.pl
new file mode 100755
index 00000000..a551850f
--- /dev/null
+++ b/contrib/vertobot/bridge.pl
@@ -0,0 +1,493 @@
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use 5.010; # //
+use IO::Socket::SSL qw(SSL_VERIFY_NONE);
+use IO::Async::Loop;
+use Net::Async::WebSocket::Client;
+use Net::Async::HTTP;
+use Net::Async::HTTP::Server;
+use JSON;
+use YAML;
+use Data::UUID;
+use Getopt::Long;
+use Data::Dumper;
+use URI::Encode qw(uri_encode uri_decode);
+
+binmode STDOUT, ":encoding(UTF-8)";
+binmode STDERR, ":encoding(UTF-8)";
+
+my $msisdn_to_matrix = {
+ '447417892400' => '@matthew:matrix.org',
+};
+
+my $matrix_to_msisdn = {};
+foreach (keys %$msisdn_to_matrix) {
+ $matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
+}
+
+
+my $loop = IO::Async::Loop->new;
+# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
+# https://rt.cpan.org/Ticket/Display.html?id=93107
+# ref $loop eq "IO::Async::Loop::Poll" and
+# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
+
+GetOptions(
+ 'C|config=s' => \my $CONFIG,
+ 'eval-from=s' => \my $EVAL_FROM,
+) or exit 1;
+
+if( defined $EVAL_FROM ) {
+ # An emergency 'eval() this file' hack
+ $SIG{HUP} = sub {
+ my $code = do {
+ open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
+ local $/; <$fh>
+ };
+
+ eval $code or warn "Cannot eval() - $@";
+ };
+}
+
+defined $CONFIG or die "Must supply --config\n";
+
+my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
+
+my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
+# No harm in always applying this
+$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
+
+my $bridgestate = {};
+my $roomid_by_callid = {};
+
+my $sessid = lc new Data::UUID->create_str();
+my $as_token = $CONFIG{"matrix-bot"}->{as_token};
+my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
+
+my $http = Net::Async::HTTP->new();
+$loop->add( $http );
+
+sub create_virtual_user
+{
+ my ($localpart) = @_;
+ my ( $response ) = $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/register?".
+ "access_token=$as_token&user_id=$localpart"
+ ),
+ content_type => "application/json",
+ content => <<EOT
+{
+ "type": "m.login.application_service",
+ "user": "$localpart"
+}
+EOT
+ )->get;
+ warn $response->as_string if ($response->code != 200);
+}
+
+my $http_server = Net::Async::HTTP::Server->new(
+ on_request => sub {
+ my $self = shift;
+ my ( $req ) = @_;
+
+ my $response;
+ my $path = uri_decode($req->path);
+ warn("request: $path");
+ if ($path =~ m#/users/\@(\+.*)#) {
+ # when queried about virtual users, auto-create them in the HS
+ my $localpart = $1;
+ create_virtual_user($localpart);
+ $response = HTTP::Response->new( 200 );
+ $response->add_content('{}');
+ $response->content_type( "application/json" );
+ }
+ elsif ($path =~ m#/transactions/(.*)#) {
+ my $event = JSON->new->decode($req->body);
+ print Dumper($event);
+
+ my $room_id = $event->{room_id};
+ my %dp = %{$CONFIG{'verto-dialog-params'}};
+ $dp{callID} = $bridgestate->{$room_id}->{callid};
+
+ if ($event->{type} eq 'm.room.membership') {
+ my $membership = $event->{content}->{membership};
+ my $state_key = $event->{state_key};
+ my $room_id = $event->{state_id};
+
+ if ($membership eq 'invite') {
+ # autojoin invites
+ my ( $response ) = $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/rooms/$room_id/join?".
+ "access_token=$as_token&user_id=$state_key"
+ ),
+ content_type => "application/json",
+ content => "{}",
+ )->get;
+ warn $response->as_string if ($response->code != 200);
+ }
+ }
+ elsif ($event->{type} eq 'm.call.invite') {
+ my $room_id = $event->{room_id};
+ $bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
+ $bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
+ $bridgestate->{$room_id}->{sessid} = $sessid;
+ # $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
+ my $offer = $event->{content}->{offer}->{sdp};
+ # $bridgestate->{$room_id}->{gathered_candidates} = 0;
+ $roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
+ # no trickle ICE in verto apparently
+
+ my $f = send_verto_json_request("verto.invite", {
+ "sdp" => $offer,
+ "dialogParams" => \%dp,
+ "sessid" => $bridgestate->{$room_id}->{sessid},
+ });
+ $self->adopt_future($f);
+ }
+ # elsif ($event->{type} eq 'm.call.candidates') {
+ # # XXX: this could fire for both matrix->verto and verto->matrix calls
+ # # and races as it collects candidates. much better to just turn off
+ # # candidate gathering in the webclient entirely for now
+ #
+ # my $room_id = $event->{room_id};
+ # # XXX: compare call IDs
+ # if (!$bridgestate->{$room_id}->{gathered_candidates}) {
+ # $bridgestate->{$room_id}->{gathered_candidates} = 1;
+ # my $offer = $bridgestate->{$room_id}->{offer};
+ # my $candidate_block = "";
+ # foreach (@{$event->{content}->{candidates}}) {
+ # $candidate_block .= "a=" . $_->{candidate} . "\r\n";
+ # }
+ # # XXX: collate using the right m= line - for now assume audio call
+ # $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
+ #
+ # my $f = send_verto_json_request("verto.invite", {
+ # "sdp" => $offer,
+ # "dialogParams" => \%dp,
+ # "sessid" => $bridgestate->{$room_id}->{sessid},
+ # });
+ # $self->adopt_future($f);
+ # }
+ # else {
+ # # ignore them, as no trickle ICE, although we might as well
+ # # batch them up
+ # # foreach (@{$event->{content}->{candidates}}) {
+ # # push @{$bridgestate->{$room_id}->{candidates}}, $_;
+ # # }
+ # }
+ # }
+ elsif ($event->{type} eq 'm.call.answer') {
+ # grab the answer and relay it to verto as a verto.answer
+ my $room_id = $event->{room_id};
+
+ my $answer = $event->{content}->{answer}->{sdp};
+ my $f = send_verto_json_request("verto.answer", {
+ "sdp" => $answer,
+ "dialogParams" => \%dp,
+ "sessid" => $bridgestate->{$room_id}->{sessid},
+ });
+ $self->adopt_future($f);
+ }
+ elsif ($event->{type} eq 'm.call.hangup') {
+ my $room_id = $event->{room_id};
+ if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
+ my $f = send_verto_json_request("verto.bye", {
+ "dialogParams" => \%dp,
+ "sessid" => $bridgestate->{$room_id}->{sessid},
+ });
+ $self->adopt_future($f);
+ }
+ else {
+ warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
+ }
+ }
+ else {
+ warn "Unhandled event: $event->{type}";
+ }
+
+ $response = HTTP::Response->new( 200 );
+ $response->add_content('{}');
+ $response->content_type( "application/json" );
+ }
+ else {
+ warn "Unhandled path: $path";
+ $response = HTTP::Response->new( 404 );
+ }
+
+ $req->respond( $response );
+ },
+);
+$loop->add( $http_server );
+
+$http_server->listen(
+ addr => { family => "inet", socktype => "stream", port => 8009 },
+ on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
+);
+
+my $bot_verto = Net::Async::WebSocket::Client->new(
+ on_frame => sub {
+ my ( $self, $frame ) = @_;
+ warn "[Verto] receiving $frame";
+ on_verto_json($frame);
+ },
+);
+$loop->add( $bot_verto );
+
+my $verto_connecting = $loop->new_future;
+$bot_verto->connect(
+ %{ $CONFIG{"verto-bot"} },
+ on_connected => sub {
+ warn("[Verto] connected to websocket");
+ if (not $verto_connecting->is_done) {
+ $verto_connecting->done($bot_verto);
+
+ send_verto_json_request("login", {
+ 'login' => $CONFIG{'verto-dialog-params'}{'login'},
+ 'passwd' => $CONFIG{'verto-config'}{'passwd'},
+ 'sessid' => $sessid,
+ });
+ }
+ },
+ on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
+ on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
+);
+
+# die Dumper($verto_connecting);
+
+my $as_url = $CONFIG{"matrix-bot"}->{as_url};
+
+Future->needs_all(
+ $http->do_request(
+ method => "POST",
+ uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
+ content_type => "application/json",
+ content => <<EOT
+{
+ "as_token": "$as_token",
+ "url": "$as_url",
+ "namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
+}
+EOT
+ )->then( sub{
+ my ($response) = (@_);
+ warn $response->as_string if ($response->code != 200);
+ return Future->done;
+ }),
+ $verto_connecting,
+)->get;
+
+$loop->attach_signal(
+ PIPE => sub { warn "pipe\n" }
+);
+$loop->attach_signal(
+ INT => sub { $loop->stop },
+);
+$loop->attach_signal(
+ TERM => sub { $loop->stop },
+);
+
+eval {
+ $loop->run;
+} or my $e = $@;
+
+die $e if $e;
+
+exit 0;
+
+{
+ my $json_id;
+ my $requests;
+
+ sub send_verto_json_request
+ {
+ $json_id ||= 1;
+
+ my ($method, $params) = @_;
+ my $json = {
+ jsonrpc => "2.0",
+ method => $method,
+ params => $params,
+ id => $json_id,
+ };
+ my $text = JSON->new->encode( $json );
+ warn "[Verto] sending $text";
+ $bot_verto->send_frame ( $text );
+ my $request = $loop->new_future;
+ $requests->{$json_id} = $request;
+ $json_id++;
+ return $request;
+ }
+
+ sub send_verto_json_response
+ {
+ my ($result, $id) = @_;
+ my $json = {
+ jsonrpc => "2.0",
+ result => $result,
+ id => $id,
+ };
+ my $text = JSON->new->encode( $json );
+ warn "[Verto] sending $text";
+ $bot_verto->send_frame ( $text );
+ }
+
+ sub on_verto_json
+ {
+ my $json = JSON->new->decode( $_[0] );
+ if ($json->{method}) {
+ if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
+ $json->{method} eq 'verto.media') {
+
+ my $caller = $json->{dialogParams}->{caller_id_number};
+ my $callee = $json->{dialogParams}->{destination_number};
+ my $caller_user = '@+' . $caller . ':' . $hs_domain;
+ my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
+ my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
+
+ if ($json->{params}->{sdp}) {
+ $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/send/m.call.answer?".
+ "access_token=$as_token&user_id=$caller_user"
+ ),
+ content_type => "application/json",
+ content => JSON->new->encode({
+ call_id => $bridgestate->{$room_id}->{matrix_callid},
+ version => 0,
+ answer => {
+ sdp => $json->{params}->{sdp},
+ type => "answer",
+ },
+ }),
+ )->then( sub {
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ })->get;
+ }
+ }
+ elsif ($json->{method} eq 'verto.invite') {
+ my $caller = $json->{dialogParams}->{caller_id_number};
+ my $callee = $json->{dialogParams}->{destination_number};
+ my $caller_user = '@+' . $caller . ':' . $hs_domain;
+ my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
+
+ my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
+ my $room_id;
+
+ # create a virtual user for the caller if needed.
+ create_virtual_user($caller);
+
+ # create a room of form #peer-peer and invite the callee
+ $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/createRoom?".
+ "access_token=$as_token&user_id=$caller_user"
+ ),
+ content_type => "application/json",
+ content => JSON->new->encode({
+ room_alias_name => $alias,
+ invite => [ $callee_user ],
+ }),
+ )->then( sub {
+ my ( $response ) = @_;
+ my $resp = JSON->new->decode($response->content);
+ $room_id = $resp->{room_id};
+ $roomid_by_callid->{$json->{params}->{callID}} = $room_id;
+ })->get;
+
+ # join it
+ my ($response) = $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/join/$room_id?".
+ "access_token=$as_token&user_id=$caller_user"
+ ),
+ content_type => "application/json",
+ content => '{}',
+ )->get;
+
+ $bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
+ $bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
+ $bridgestate->{$room_id}->{sessid} = $sessid;
+
+ # put the m.call.invite in there
+ $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/send/m.call.invite?".
+ "access_token=$as_token&user_id=$caller_user"
+ ),
+ content_type => "application/json",
+ content => JSON->new->encode({
+ call_id => $bridgestate->{$room_id}->{matrix_callid},
+ version => 0,
+ answer => {
+ sdp => $json->{params}->{sdp},
+ type => "offer",
+ },
+ }),
+ )->then( sub {
+ # acknowledge the verto
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ })->get;
+ }
+ elsif ($json->{method} eq 'verto.bye') {
+ my $caller = $json->{dialogParams}->{caller_id_number};
+ my $callee = $json->{dialogParams}->{destination_number};
+ my $caller_user = '@+' . $caller . ':' . $hs_domain;
+ my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
+ my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
+
+ # put the m.call.hangup into the room
+ $http->do_request(
+ method => "POST",
+ uri => URI->new(
+ $CONFIG{"matrix"}->{server}.
+ "/_matrix/client/api/v1/send/m.call.hangup?".
+ "access_token=$as_token&user_id=$caller_user"
+ ),
+ content_type => "application/json",
+ content => JSON->new->encode({
+ call_id => $bridgestate->{$room_id}->{matrix_callid},
+ version => 0,
+ }),
+ )->then( sub {
+ # acknowledge the verto
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ })->get;
+ }
+ else {
+ warn ("[Verto] unhandled method: " . $json->{method});
+ send_verto_json_response( {
+ method => $json->{method},
+ }, $json->{id});
+ }
+ }
+ elsif ($json->{result}) {
+ $requests->{$json->{id}}->done($json->{result});
+ }
+ elsif ($json->{error}) {
+ $requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
+ }
+ }
+}
+
diff --git a/contrib/vertobot/config.yaml b/contrib/vertobot/config.yaml
new file mode 100644
index 00000000..555d9389
--- /dev/null
+++ b/contrib/vertobot/config.yaml
@@ -0,0 +1,32 @@
+# Generic Matrix connection params
+matrix:
+ server: 'matrix.org'
+ SSL: 1
+
+# Bot-user connection details
+matrix-bot:
+ user_id: '@vertobot:matrix.org'
+ password: ''
+ domain: 'matrix.org"
+ as_url: 'http://localhost:8009'
+ as_token: 'vertobot123'
+
+verto-bot:
+ host: webrtc.freeswitch.org
+ service: 8081
+ url: "ws://webrtc.freeswitch.org:8081/"
+
+verto-config:
+ passwd: 1234
+
+verto-dialog-params:
+ useVideo: false
+ useStereo: false
+ tag: "webcam"
+ login: "1008@webrtc.freeswitch.org"
+ destination_number: "9664"
+ caller_id_name: "FreeSWITCH User"
+ caller_id_number: "1008"
+ callID: ""
+ remote_caller_id_name: "Outbound Call"
+ remote_caller_id_number: "9664"
diff --git a/contrib/vertobot/cpanfile b/contrib/vertobot/cpanfile
new file mode 100644
index 00000000..800dc288
--- /dev/null
+++ b/contrib/vertobot/cpanfile
@@ -0,0 +1,14 @@
+requires 'parent', 0;
+requires 'Future', '>= 0.29';
+requires 'Net::Async::Matrix', '>= 0.11_002';
+requires 'Net::Async::Matrix::Utils';
+requires 'Net::Async::WebSocket::Protocol', 0;
+requires 'Data::UUID', 0;
+requires 'IO::Async', '>= 0.63';
+requires 'IO::Async::SSL', 0;
+requires 'IO::Socket::SSL', 0;
+requires 'YAML', 0;
+requires 'JSON', 0;
+requires 'Getopt::Long', 0;
+
+
diff --git a/contrib/vertobot/verto-example.json b/contrib/vertobot/verto-example.json
new file mode 100644
index 00000000..e0230498
--- /dev/null
+++ b/contrib/vertobot/verto-example.json
@@ -0,0 +1,207 @@
+# JSON is shown in *reverse* chronological order.
+# Send v. Receive is implicit.
+
+{
+ "jsonrpc": "2.0",
+ "id": 7,
+ "result": {
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "message": "CALL ENDED",
+ "causeCode": 16,
+ "cause": "NORMAL_CLEARING",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "verto.bye",
+ "params": {
+ "dialogParams": {
+ "useVideo": false,
+ "useStereo": true,
+ "tag": "webcam",
+ "login": "1008@webrtc.freeswitch.org",
+ "destination_number": "9664",
+ "caller_id_name": "FreeSWITCH User",
+ "caller_id_number": "1008",
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "remote_caller_id_name": "Outbound Call",
+ "remote_caller_id_number": "9664"
+ },
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 7
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 6,
+ "result": {
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "action": "toggleHold",
+ "holdState": "active",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "verto.modify",
+ "params": {
+ "action": "toggleHold",
+ "dialogParams": {
+ "useVideo": false,
+ "useStereo": true,
+ "tag": "webcam",
+ "login": "1008@webrtc.freeswitch.org",
+ "destination_number": "9664",
+ "caller_id_name": "FreeSWITCH User",
+ "caller_id_number": "1008",
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "remote_caller_id_name": "Outbound Call",
+ "remote_caller_id_number": "9664"
+ },
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 6
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 5,
+ "result": {
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "action": "toggleHold",
+ "holdState": "held",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "verto.modify",
+ "params": {
+ "action": "toggleHold",
+ "dialogParams": {
+ "useVideo": false,
+ "useStereo": true,
+ "tag": "webcam",
+ "login": "1008@webrtc.freeswitch.org",
+ "destination_number": "9664",
+ "caller_id_name": "FreeSWITCH User",
+ "caller_id_number": "1008",
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "remote_caller_id_name": "Outbound Call",
+ "remote_caller_id_number": "9664"
+ },
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 5
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 349819,
+ "result": {
+ "method": "verto.answer"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 349819,
+ "method": "verto.answer",
+ "params": {
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "sdp": "v=0\no=FreeSWITCH 1417101432 1417101433 IN IP4 209.105.235.10\ns=FreeSWITCH\nc=IN IP4 209.105.235.10\nt=0 0\na=msid-semantic: WMS jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\nm=audio 30134 RTP/SAVPF 111 126\na=rtpmap:111 opus/48000/2\na=fmtp:111 minptime=10; stereo=1\na=rtpmap:126 telephone-event/8000\na=silenceSupp:off - - - -\na=ptime:20\na=sendrecv\na=fingerprint:sha-256 F8:72:18:E9:72:89:99:22:5B:F8:B6:C6:C6:0D:C5:9B:B2:FB:BC:CA:8D:AB:13:8A:66:E1:37:38:A0:16:AA:41\na=rtcp-mux\na=rtcp:30134 IN IP4 209.105.235.10\na=ssrc:210967934 cname:rOIEajpw4FocakWY\na=ssrc:210967934 msid:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq a0\na=ssrc:210967934 mslabel:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\na=ssrc:210967934 label:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vqa0\na=ice-ufrag:OKwTmGLapwmxn7OF\na=ice-pwd:MmaMwq8rVmtWxfLbQ7U2Ew3T\na=candidate:2372654928 1 udp 659136 209.105.235.10 30134 typ host generation 0\n"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 4,
+ "result": {
+ "message": "CALL CREATED",
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "verto.invite",
+ "params": {
+ "sdp": "v=0\r\no=- 1381685806032722557 2 IN IP4 127.0.0.1\r\ns=-\r\nt=0 0\r\na=group:BUNDLE audio\r\na=msid-semantic: WMS 6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\nm=audio 63088 RTP/SAVPF 111 103 104 0 8 106 105 13 126\r\nc=IN IP4 81.138.8.249\r\na=rtcp:63088 IN IP4 81.138.8.249\r\na=candidate:460398169 1 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:460398169 2 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:3460887983 1 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:3460887983 2 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:945327227 1 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:945327227 2 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:1441981097 1 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:1441981097 2 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:2160789855 1 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=candidate:2160789855 2 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=ice-ufrag:cP4qeRhn0LpcpA88\r\na=ice-pwd:fREmgSkXsDLGUUH1bwfrBQhW\r\na=ice-options:google-ice\r\na=fingerprint:sha-256 AF:35:64:1B:62:8A:EF:27:AE:2B:88:2E:FE:78:29:0B:08:DA:64:6C:DE:02:57:E3:EE:B1:D7:86:B8:36:8F:B0\r\na=setup:actpass\r\na=mid:audio\r\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\na=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\na=sendrecv\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=fmtp:111 minptime=10; stereo=1\r\na=rtpmap:103 ISAC/16000\r\na=rtpmap:104 ISAC/32000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:106 CN/32000\r\na=rtpmap:105 CN/16000\r\na=rtpmap:13 CN/8000\r\na=rtpmap:126 telephone-event/8000\r\na=maxptime:60\r\na=ssrc:558827154 cname:vdKHBNqa17t2gmE3\r\na=ssrc:558827154 msid:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\na=ssrc:558827154 mslabel:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\na=ssrc:558827154 label:bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\n",
+ "dialogParams": {
+ "useVideo": false,
+ "useStereo": true,
+ "tag": "webcam",
+ "login": "1008@webrtc.freeswitch.org",
+ "destination_number": "9664",
+ "caller_id_name": "FreeSWITCH User",
+ "caller_id_number": "1008",
+ "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
+ "remote_caller_id_name": "Outbound Call",
+ "remote_caller_id_number": "9664"
+ },
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 4
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 3,
+ "result": {
+ "message": "logged in",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "error": {
+ "code": -32000,
+ "message": "Authentication Required"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "login",
+ "params": {
+ "login": "1008@webrtc.freeswitch.org",
+ "passwd": "1234",
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 3
+}
+
+{
+ "jsonrpc": "2.0",
+ "id": 2,
+ "error": {
+ "code": -32000,
+ "message": "Authentication Required"
+ }
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "login",
+ "params": {
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 1
+}
+
+{
+ "jsonrpc": "2.0",
+ "method": "login",
+ "params": {
+ "sessid": "03a11060-3e14-23b6-c620-51b892c52983"
+ },
+ "id": 2
+}
diff --git a/demo/README b/demo/README
new file mode 100644
index 00000000..0b584ceb
--- /dev/null
+++ b/demo/README
@@ -0,0 +1,22 @@
+Requires you to have done:
+ python setup.py develop
+
+
+The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
+It will also start a web server on port 8000 pointed at the webclient.
+
+stop.sh will stop the synapse servers and the webclient.
+
+clean.sh will delete the databases and log files.
+
+To start a completely new set of servers, run:
+
+ ./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
+
+
+Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
+
+
+
+Also note that when joining a public room on a differnt HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.
+
diff --git a/demo/clean.sh b/demo/clean.sh
new file mode 100755
index 00000000..418ca945
--- /dev/null
+++ b/demo/clean.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+set -e
+
+DIR="$( cd "$( dirname "$0" )" && pwd )"
+
+PID_FILE="$DIR/servers.pid"
+
+if [ -f $PID_FILE ]; then
+ echo "servers.pid exists!"
+ exit 1
+fi
+
+for port in 8080 8081 8082; do
+ rm -rf $DIR/$port
+ rm -rf $DIR/media_store.$port
+done
+
+rm -rf $DIR/etc
diff --git a/demo/demo.tls.dh b/demo/demo.tls.dh
new file mode 100644
index 00000000..cbc58272
--- /dev/null
+++ b/demo/demo.tls.dh
@@ -0,0 +1,9 @@
+2048-bit DH parameters taken from rfc3526
+-----BEGIN DH PARAMETERS-----
+MIIBCAKCAQEA///////////JD9qiIWjCNMTGYouA3BzRKQJOCIpnzHQCC76mOxOb
+IlFKCHmONATd75UZs806QxswKwpt8l8UN0/hNW1tUcJF5IW1dmJefsb0TELppjft
+awv/XLb0Brft7jhr+1qJn6WunyQRfEsf5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT
+mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVSu57VKQdwlpZtZww1Tkq8mATxdGwIyhgh
+fDKQXkYuNs474553LBgOhgObJ4Oi7Aeij7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq
+5RXSJhiY+gUQFXKOWoqsqmj//////////wIBAg==
+-----END DH PARAMETERS-----
diff --git a/demo/start.sh b/demo/start.sh
new file mode 100755
index 00000000..dcc4d6f4
--- /dev/null
+++ b/demo/start.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "$0" )" && pwd )"
+
+CWD=$(pwd)
+
+cd "$DIR/.."
+
+mkdir -p demo/etc
+
+export PYTHONPATH=$(readlink -f $(pwd))
+
+
+echo $PYTHONPATH
+
+for port in 8080 8081 8082; do
+ echo "Starting server on port $port... "
+
+ https_port=$((port + 400))
+ mkdir -p demo/$port
+ pushd demo/$port
+
+ #rm $DIR/etc/$port.config
+ python -m synapse.app.homeserver \
+ --generate-config \
+ -H "localhost:$https_port" \
+ --config-path "$DIR/etc/$port.config" \
+ --report-stats no
+
+ # Check script parameters
+ if [ $# -eq 1 ]; then
+ if [ $1 = "--no-rate-limit" ]; then
+ # Set high limits in config file to disable rate limiting
+ perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
+ perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
+ fi
+ fi
+
+ perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
+
+ if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
+ echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
+ fi
+ if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
+ echo "report_stats: false" >> $DIR/etc/$port.config
+ fi
+
+ python -m synapse.app.homeserver \
+ --config-path "$DIR/etc/$port.config" \
+ -D \
+ -vv \
+
+ popd
+done
+
+cd "$CWD"
diff --git a/demo/stop.sh b/demo/stop.sh
new file mode 100755
index 00000000..85a1d2c1
--- /dev/null
+++ b/demo/stop.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "$0" )" && pwd )"
+
+FILES=$(find "$DIR" -name "*.pid" -type f);
+
+for pid_file in $FILES; do
+ pid=$(cat "$pid_file")
+ if [[ $pid ]]; then
+ echo "Killing $pid_file with $pid"
+ kill $pid
+ fi
+done
+
diff --git a/demo/webserver.py b/demo/webserver.py
new file mode 100644
index 00000000..875095c8
--- /dev/null
+++ b/demo/webserver.py
@@ -0,0 +1,62 @@
+import argparse
+import BaseHTTPServer
+import os
+import SimpleHTTPServer
+import cgi, logging
+
+from daemonize import Daemonize
+
+class SimpleHTTPRequestHandlerWithPOST(SimpleHTTPServer.SimpleHTTPRequestHandler):
+ UPLOAD_PATH = "upload"
+
+ """
+ Accept all post request as file upload
+ """
+ def do_POST(self):
+
+ path = os.path.join(self.UPLOAD_PATH, os.path.basename(self.path))
+ length = self.headers['content-length']
+ data = self.rfile.read(int(length))
+
+ with open(path, 'wb') as fh:
+ fh.write(data)
+
+ self.send_response(200)
+ self.send_header('Content-Type', 'application/json')
+ self.end_headers()
+
+ # Return the absolute path of the uploaded file
+ self.wfile.write('{"url":"/%s"}' % path)
+
+
+def setup():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("directory")
+ parser.add_argument("-p", "--port", dest="port", type=int, default=8080)
+ parser.add_argument('-P', "--pid-file", dest="pid", default="web.pid")
+ args = parser.parse_args()
+
+ # Get absolute path to directory to serve, as daemonize changes to '/'
+ os.chdir(args.directory)
+ dr = os.getcwd()
+
+ httpd = BaseHTTPServer.HTTPServer(
+ ('', args.port),
+ SimpleHTTPRequestHandlerWithPOST
+ )
+
+ def run():
+ os.chdir(dr)
+ httpd.serve_forever()
+
+ daemon = Daemonize(
+ app="synapse-webclient",
+ pid=args.pid,
+ action=run,
+ auto_close_fds=False,
+ )
+
+ daemon.start()
+
+if __name__ == '__main__':
+ setup()
diff --git a/docs/CAPTCHA_SETUP b/docs/CAPTCHA_SETUP
new file mode 100644
index 00000000..75ff8098
--- /dev/null
+++ b/docs/CAPTCHA_SETUP
@@ -0,0 +1,31 @@
+Captcha can be enabled for this home server. This file explains how to do that.
+The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
+
+Getting keys
+------------
+Requires a public/private key pair from:
+
+https://developers.google.com/recaptcha/
+
+
+Setting ReCaptcha Keys
+----------------------
+The keys are a config option on the home server config. If they are not
+visible, you can generate them via --generate-config. Set the following value:
+
+ recaptcha_public_key: YOUR_PUBLIC_KEY
+ recaptcha_private_key: YOUR_PRIVATE_KEY
+
+In addition, you MUST enable captchas via:
+
+ enable_registration_captcha: true
+
+Configuring IP used for auth
+----------------------------
+The ReCaptcha API requires that the IP address of the user who solved the
+captcha is sent. If the client is connecting through a proxy or load balancer,
+it may be required to use the X-Forwarded-For (XFF) header instead of the origin
+IP address. This can be configured as an option on the home server like so:
+
+ captcha_ip_origin_is_x_forwarded: true
+
diff --git a/docs/README.rst b/docs/README.rst
new file mode 100644
index 00000000..3012da8b
--- /dev/null
+++ b/docs/README.rst
@@ -0,0 +1,6 @@
+All matrix-generic documentation now lives in its own project at
+
+github.com/matrix-org/matrix-doc.git
+
+Only Synapse implementation-specific documentation lives here now
+(together with some older stuff will be shortly migrated over to matrix-doc)
diff --git a/docs/ancient_architecture_notes.rst b/docs/ancient_architecture_notes.rst
new file mode 100644
index 00000000..2a5a2613
--- /dev/null
+++ b/docs/ancient_architecture_notes.rst
@@ -0,0 +1,59 @@
+.. WARNING::
+ These architecture notes are spectacularly old, and date back to when Synapse
+ was just federation code in isolation. This should be merged into the main
+ spec.
+
+
+= Server to Server =
+
+== Server to Server Stack ==
+
+To use the server to server stack, home servers should only need to interact with the Messaging layer.
+
+The server to server side of things is designed into 4 distinct layers:
+
+ 1. Messaging Layer
+ 2. Pdu Layer
+ 3. Transaction Layer
+ 4. Transport Layer
+
+Where the bottom (the transport layer) is what talks to the internet via HTTP, and the top (the messaging layer) talks to the rest of the Home Server with a domain specific API.
+
+1. Messaging Layer
+ This is what the rest of the Home Server hits to send messages, join rooms, etc. It also allows you to register callbacks for when it get's notified by lower levels that e.g. a new message has been received.
+
+ It is responsible for serializing requests to send to the data layer, and to parse requests received from the data layer.
+
+
+2. PDU Layer
+ This layer handles:
+ * duplicate pdu_id's - i.e., it makes sure we ignore them.
+ * responding to requests for a given pdu_id
+ * responding to requests for all metadata for a given context (i.e. room)
+ * handling incoming backfill requests
+
+ So it has to parse incoming messages to discover which are metadata and which aren't, and has to correctly clobber existing metadata where appropriate.
+
+ For incoming PDUs, it has to check the PDUs it references to see if we have missed any. If we have go and ask someone (another home server) for it.
+
+
+3. Transaction Layer
+ This layer makes incoming requests idempotent. I.e., it stores which transaction id's we have seen and what our response were. If we have already seen a message with the given transaction id, we do not notify higher levels but simply respond with the previous response.
+
+transaction_id is from "GET /send/<tx_id>/"
+
+ It's also responsible for batching PDUs into single transaction for sending to remote destinations, so that we only ever have one transaction in flight to a given destination at any one time.
+
+ This is also responsible for answering requests for things after a given set of transactions, i.e., ask for everything after 'ver' X.
+
+
+4. Transport Layer
+ This is responsible for starting a HTTP server and hitting the correct callbacks on the Transaction layer, as well as sending both data and requests for data.
+
+
+== Persistence ==
+
+We persist things in a single sqlite3 database. All database queries get run on a separate, dedicated thread. This that we only ever have one query running at a time, making it a lot easier to do things in a safe manner.
+
+The queries are located in the synapse.persistence.transactions module, and the table information in the synapse.persistence.tables module.
+
diff --git a/docs/application_services.rst b/docs/application_services.rst
new file mode 100644
index 00000000..7e87ac9a
--- /dev/null
+++ b/docs/application_services.rst
@@ -0,0 +1,36 @@
+Registering an Application Service
+==================================
+
+The registration of new application services depends on the homeserver used.
+In synapse, you need to create a new configuration file for your AS and add it
+to the list specified under the ``app_service_config_files`` config
+option in your synapse config.
+
+For example:
+
+.. code-block:: yaml
+
+ app_service_config_files:
+ - /home/matrix/.synapse/<your-AS>.yaml
+
+
+The format of the AS configuration file is as follows:
+
+.. code-block:: yaml
+
+ url: <base url of AS>
+ as_token: <token AS will add to requests to HS>
+ hs_token: <token HS will add to requests to AS>
+ sender_localpart: <localpart of AS user>
+ namespaces:
+ users: # List of users we're interested in
+ - exclusive: <bool>
+ regex: <regex>
+ - ...
+ aliases: [] # List of aliases we're interested in
+ rooms: [] # List of room ids we're interested in
+
+See the spec_ for further details on how application services work.
+
+.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
+
diff --git a/docs/architecture.rst b/docs/architecture.rst
new file mode 100644
index 00000000..98050428
--- /dev/null
+++ b/docs/architecture.rst
@@ -0,0 +1,68 @@
+Synapse Architecture
+====================
+
+As of the end of Oct 2014, Synapse's overall architecture looks like::
+
+ synapse
+ .-----------------------------------------------------.
+ | Notifier |
+ | ^ | |
+ | | | |
+ | .------------|------. |
+ | | handlers/ | | |
+ | | v | |
+ | | Event*Handler <--------> rest/* <=> Client
+ | | Rooms*Handler | |
+ HSes <=> federation/* <==> FederationHandler | |
+ | | | PresenceHandler | |
+ | | | TypingHandler | |
+ | | '-------------------' |
+ | | | | |
+ | | state/* | |
+ | | | | |
+ | | v v |
+ | `--------------> storage/* |
+ | | |
+ '--------------------------|--------------------------'
+ v
+ .----.
+ | DB |
+ '----'
+
+* Handlers: business logic of synapse itself. Follows a set contract of BaseHandler:
+
+ - BaseHandler gives us onNewRoomEvent which: (TODO: flesh this out and make it less cryptic):
+
+ + handle_state(event)
+ + auth(event)
+ + persist_event(event)
+ + notify notifier or federation(event)
+
+ - PresenceHandler: use distributor to get EDUs out of Federation. Very
+ lightweight logic built on the distributor
+ - TypingHandler: use distributor to get EDUs out of Federation. Very
+ lightweight logic built on the distributor
+ - EventsHandler: handles the events stream...
+ - FederationHandler: - gets PDU from Federation Layer; turns into an event;
+ follows basehandler functionality.
+ - RoomsHandler: does all the room logic, including members - lots of classes in
+ RoomsHandler.
+ - ProfileHandler: talks to the storage to store/retrieve profile info.
+
+* EventFactory: generates events of particular event types.
+* Notifier: Backs the events handler
+* REST: Interfaces handlers and events to the outside world via HTTP/JSON.
+ Converts events back and forth from JSON.
+* Federation: holds the HTTP client & server to talk to other servers. Does
+ replication to make sure there's nothing missing in the graph. Handles
+ reliability. Handles txns.
+* Distributor: generic event bus. used for presence & typing only currently.
+ Notifier could be implemented using Distributor - so far we are only using for
+ things which actually /require/ dynamic pluggability however as it can
+ obfuscate the actual flow of control.
+* Auth: helper singleton to say whether a given event is allowed to do a given
+ thing (TODO: put this on the diagram)
+* State: helper singleton: does state conflict resolution. You give it an event
+ and it tells you if it actually updates the state or not, and annotates the
+ event up properly and handles merge conflict resolution.
+* Storage: abstracts the storage engine.
diff --git a/docs/code_style.rst b/docs/code_style.rst
new file mode 100644
index 00000000..dc40a7ab
--- /dev/null
+++ b/docs/code_style.rst
@@ -0,0 +1,49 @@
+Basically, PEP8
+
+- NEVER tabs. 4 spaces to indent.
+- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
+ the overflowing content is not semantically significant and avoids an
+ explosion of vertical whitespace).
+- Use camel case for class and type names
+- Use underscores for functions and variables.
+- Use double quotes.
+- Use parentheses instead of '\\' for line continuation where ever possible
+ (which is pretty much everywhere)
+- There should be max a single new line between:
+ - statements
+ - functions in a class
+- There should be two new lines between:
+ - definitions in a module (e.g., between different classes)
+- There should be spaces where spaces should be and not where there shouldn't be:
+ - a single space after a comma
+ - a single space before and after for '=' when used as assignment
+ - no spaces before and after for '=' for default values and keyword arguments.
+- Indenting must follow PEP8; either hanging indent or multiline-visual indent
+ depending on the size and shape of the arguments and what makes more sense to
+ the author. In other words, both this::
+
+ print("I am a fish %s" % "moo")
+
+ and this::
+
+ print("I am a fish %s" %
+ "moo")
+
+ and this::
+
+ print(
+ "I am a fish %s" %
+ "moo"
+ )
+
+ ...are valid, although given each one takes up 2x more vertical space than
+ the previous, it's up to the author's discretion as to which layout makes most
+ sense for their function invocation. (e.g. if they want to add comments
+ per-argument, or put expressions in the arguments, or group related arguments
+ together, or want to deliberately extend or preserve vertical/horizontal
+ space)
+
+Comments should follow the google code style. This is so that we can generate
+documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
+
+Code should pass pep8 --max-line-length=100 without any warnings.
diff --git a/docs/media_repository.rst b/docs/media_repository.rst
new file mode 100644
index 00000000..1037b5be
--- /dev/null
+++ b/docs/media_repository.rst
@@ -0,0 +1,27 @@
+Media Repository
+================
+
+*Synapse implementation-specific details for the media repository*
+
+The media repository is where attachments and avatar photos are stored.
+It stores attachment content and thumbnails for media uploaded by local users.
+It caches attachment content and thumbnails for media uploaded by remote users.
+
+Storage
+-------
+
+Each item of media is assigned a ``media_id`` when it is uploaded.
+The ``media_id`` is a randomly chosen, URL safe 24 character string.
+Metadata such as the MIME type, upload time and length are stored in the
+sqlite3 database indexed by ``media_id``.
+Content is stored on the filesystem under a ``"local_content"`` directory.
+Thumbnails are stored under a ``"local_thumbnails"`` directory.
+The item with ``media_id`` ``"aabbccccccccdddddddddddd"`` is stored under
+``"local_content/aa/bb/ccccccccdddddddddddd"``. Its thumbnail with width
+``128`` and height ``96`` and type ``"image/jpeg"`` is stored under
+``"local_thumbnails/aa/bb/ccccccccdddddddddddd/128-96-image-jpeg"``
+Remote content is cached under ``"remote_content"`` directory. Each item of
+remote content is assigned a local "``filesystem_id``" to ensure that the
+directory structure ``"remote_content/server_name/aa/bb/ccccccccdddddddddddd"``
+is appropriate. Thumbnails for remote content are stored under
+``"remote_thumbnails/server_name/..."``
diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst
new file mode 100644
index 00000000..c1f5ae21
--- /dev/null
+++ b/docs/metrics-howto.rst
@@ -0,0 +1,50 @@
+How to monitor Synapse metrics using Prometheus
+===============================================
+
+1: Install prometheus:
+ Follow instructions at http://prometheus.io/docs/introduction/install/
+
+2: Enable synapse metrics:
+ Simply setting a (local) port number will enable it. Pick a port.
+ prometheus itself defaults to 9090, so starting just above that for
+ locally monitored services seems reasonable. E.g. 9092:
+
+ Add to homeserver.yaml
+
+ metrics_port: 9092
+
+ Restart synapse
+
+3: Check out synapse-prometheus-config
+ https://github.com/matrix-org/synapse-prometheus-config
+
+4: Add ``synapse.html`` and ``synapse.rules``
+ The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
+ and the ``.rules`` file needs to be invoked somewhere in the main config
+ file. A symlink to each from the git checkout into the prometheus directory
+ might be easiest to ensure ``git pull`` keeps it updated.
+
+5: Add a prometheus target for synapse
+ This is easiest if prometheus runs on the same machine as synapse, as it can
+ then just use localhost::
+
+ global: {
+ rule_file: "synapse.rules"
+ }
+
+ job: {
+ name: "synapse"
+
+ target_group: {
+ target: "http://localhost:9092/"
+ }
+ }
+
+6: Start prometheus::
+
+ ./prometheus -config.file=prometheus.conf
+
+7: Wait a few seconds for it to start and perform the first scrape,
+ then visit the console:
+
+ http://server-where-prometheus-runs:9090/consoles/synapse.html
diff --git a/docs/postgres.rst b/docs/postgres.rst
new file mode 100644
index 00000000..b5027fef
--- /dev/null
+++ b/docs/postgres.rst
@@ -0,0 +1,107 @@
+Using Postgres
+--------------
+
+Set up database
+===============
+
+The PostgreSQL database used *must* have the correct encoding set, otherwise
+would not be able to store UTF8 strings. To create a database with the correct
+encoding use, e.g.::
+
+ CREATE DATABASE synapse
+ ENCODING 'UTF8'
+ LC_COLLATE='C'
+ LC_CTYPE='C'
+ template=template0
+ OWNER synapse_user;
+
+This would create an appropriate database named ``synapse`` owned by the
+``synapse_user`` user (which must already exist).
+
+Set up client
+=============
+
+Postgres support depends on the postgres python connector ``psycopg2``. In the
+virtual env::
+
+ sudo apt-get install libpq-dev
+ pip install psycopg2
+
+
+Synapse config
+==============
+
+When you are ready to start using PostgreSQL, add the following line to your
+config file::
+
+ database:
+ name: psycopg2
+ args:
+ user: <user>
+ password: <pass>
+ database: <db>
+ host: <host>
+ cp_min: 5
+ cp_max: 10
+
+All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
+function, except keys beginning with ``cp_``, which are consumed by the twisted
+adbapi connection pool.
+
+
+Porting from SQLite
+===================
+
+Overview
+~~~~~~~~
+
+The script ``synapse_port_db`` allows porting an existing synapse server
+backed by SQLite to using PostgreSQL. This is done in as a two phase process:
+
+1. Copy the existing SQLite database to a separate location (while the server
+ is down) and running the port script against that offline database.
+2. Shut down the server. Rerun the port script to port any data that has come
+ in since taking the first snapshot. Restart server against the PostgreSQL
+ database.
+
+The port script is designed to be run repeatedly against newer snapshots of the
+SQLite database file. This makes it safe to repeat step 1 if there was a delay
+between taking the previous snapshot and being ready to do step 2.
+
+It is safe to at any time kill the port script and restart it.
+
+Using the port script
+~~~~~~~~~~~~~~~~~~~~~
+
+Firstly, shut down the currently running synapse server and copy its database
+file (typically ``homeserver.db``) to another location. Once the copy is
+complete, restart synapse. For instance::
+
+ ./synctl stop
+ cp homeserver.db homeserver.db.snapshot
+ ./synctl start
+
+Assuming your new config file (as described in the section *Synapse config*)
+is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
+``homeserver.db.snapshot`` then simply run::
+
+ synapse_port_db --sqlite-database homeserver.db.snapshot \
+ --postgres-config homeserver-postgres.yaml
+
+The flag ``--curses`` displays a coloured curses progress UI.
+
+If the script took a long time to complete, or time has otherwise passed since
+the original snapshot was taken, repeat the previous steps with a newer
+snapshot.
+
+To complete the conversion shut down the synapse server and run the port
+script one last time, e.g. if the SQLite database is at ``homeserver.db``
+run::
+
+ synapse_port_db --sqlite-database homeserver.db \
+ --postgres-config database_config.yaml
+
+Once that has completed, change the synapse config to point at the PostgreSQL
+database configuration file using the ``database_config`` parameter (see
+`Synapse Config`_) and restart synapse. Synapse should now be running against
+PostgreSQL.
diff --git a/docs/sphinx/README.rst b/docs/sphinx/README.rst
new file mode 100644
index 00000000..a7ab7c55
--- /dev/null
+++ b/docs/sphinx/README.rst
@@ -0,0 +1 @@
+TODO: how (if at all) is this actually maintained?
diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
new file mode 100644
index 00000000..15c19834
--- /dev/null
+++ b/docs/sphinx/conf.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+#
+# Synapse documentation build configuration file, created by
+# sphinx-quickstart on Tue Jun 10 17:31:02 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.ifconfig',
+ 'sphinxcontrib.napoleon',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Synapse'
+copyright = u'2014, TNG'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.0'
+# The full version, including alpha/beta/rc tags.
+release = '1.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Synapsedoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'Synapse.tex', u'Synapse Documentation',
+ u'TNG', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'synapse', u'Synapse Documentation',
+ [u'TNG'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Synapse', u'Synapse Documentation',
+ u'TNG', 'Synapse', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+napoleon_include_special_with_doc = True
+napoleon_use_ivar = True
diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst
new file mode 100644
index 00000000..76a4c0c7
--- /dev/null
+++ b/docs/sphinx/index.rst
@@ -0,0 +1,20 @@
+.. Synapse documentation master file, created by
+ sphinx-quickstart on Tue Jun 10 17:31:02 2014.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to Synapse's documentation!
+===================================
+
+Contents:
+
+.. toctree::
+ synapse
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/docs/sphinx/modules.rst b/docs/sphinx/modules.rst
new file mode 100644
index 00000000..1c7f70bd
--- /dev/null
+++ b/docs/sphinx/modules.rst
@@ -0,0 +1,7 @@
+synapse
+=======
+
+.. toctree::
+ :maxdepth: 4
+
+ synapse
diff --git a/docs/sphinx/synapse.api.auth.rst b/docs/sphinx/synapse.api.auth.rst
new file mode 100644
index 00000000..931eb598
--- /dev/null
+++ b/docs/sphinx/synapse.api.auth.rst
@@ -0,0 +1,7 @@
+synapse.api.auth module
+=======================
+
+.. automodule:: synapse.api.auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.constants.rst b/docs/sphinx/synapse.api.constants.rst
new file mode 100644
index 00000000..a1e3c47f
--- /dev/null
+++ b/docs/sphinx/synapse.api.constants.rst
@@ -0,0 +1,7 @@
+synapse.api.constants module
+============================
+
+.. automodule:: synapse.api.constants
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.dbobjects.rst b/docs/sphinx/synapse.api.dbobjects.rst
new file mode 100644
index 00000000..e9d31167
--- /dev/null
+++ b/docs/sphinx/synapse.api.dbobjects.rst
@@ -0,0 +1,7 @@
+synapse.api.dbobjects module
+============================
+
+.. automodule:: synapse.api.dbobjects
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.errors.rst b/docs/sphinx/synapse.api.errors.rst
new file mode 100644
index 00000000..f1c68814
--- /dev/null
+++ b/docs/sphinx/synapse.api.errors.rst
@@ -0,0 +1,7 @@
+synapse.api.errors module
+=========================
+
+.. automodule:: synapse.api.errors
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.event_stream.rst b/docs/sphinx/synapse.api.event_stream.rst
new file mode 100644
index 00000000..9291cb2d
--- /dev/null
+++ b/docs/sphinx/synapse.api.event_stream.rst
@@ -0,0 +1,7 @@
+synapse.api.event_stream module
+===============================
+
+.. automodule:: synapse.api.event_stream
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.events.factory.rst b/docs/sphinx/synapse.api.events.factory.rst
new file mode 100644
index 00000000..2e71ff60
--- /dev/null
+++ b/docs/sphinx/synapse.api.events.factory.rst
@@ -0,0 +1,7 @@
+synapse.api.events.factory module
+=================================
+
+.. automodule:: synapse.api.events.factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.events.room.rst b/docs/sphinx/synapse.api.events.room.rst
new file mode 100644
index 00000000..6cd59985
--- /dev/null
+++ b/docs/sphinx/synapse.api.events.room.rst
@@ -0,0 +1,7 @@
+synapse.api.events.room module
+==============================
+
+.. automodule:: synapse.api.events.room
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.events.rst b/docs/sphinx/synapse.api.events.rst
new file mode 100644
index 00000000..b762da55
--- /dev/null
+++ b/docs/sphinx/synapse.api.events.rst
@@ -0,0 +1,18 @@
+synapse.api.events package
+==========================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.api.events.factory
+ synapse.api.events.room
+
+Module contents
+---------------
+
+.. automodule:: synapse.api.events
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.events.rst b/docs/sphinx/synapse.api.handlers.events.rst
new file mode 100644
index 00000000..d2e1b54a
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.events.rst
@@ -0,0 +1,7 @@
+synapse.api.handlers.events module
+==================================
+
+.. automodule:: synapse.api.handlers.events
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.factory.rst b/docs/sphinx/synapse.api.handlers.factory.rst
new file mode 100644
index 00000000..b04a93f7
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.factory.rst
@@ -0,0 +1,7 @@
+synapse.api.handlers.factory module
+===================================
+
+.. automodule:: synapse.api.handlers.factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.federation.rst b/docs/sphinx/synapse.api.handlers.federation.rst
new file mode 100644
index 00000000..61a65422
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.federation.rst
@@ -0,0 +1,7 @@
+synapse.api.handlers.federation module
+======================================
+
+.. automodule:: synapse.api.handlers.federation
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.register.rst b/docs/sphinx/synapse.api.handlers.register.rst
new file mode 100644
index 00000000..388f144e
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.register.rst
@@ -0,0 +1,7 @@
+synapse.api.handlers.register module
+====================================
+
+.. automodule:: synapse.api.handlers.register
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.room.rst b/docs/sphinx/synapse.api.handlers.room.rst
new file mode 100644
index 00000000..8ca156c7
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.room.rst
@@ -0,0 +1,7 @@
+synapse.api.handlers.room module
+================================
+
+.. automodule:: synapse.api.handlers.room
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.handlers.rst b/docs/sphinx/synapse.api.handlers.rst
new file mode 100644
index 00000000..e84f563f
--- /dev/null
+++ b/docs/sphinx/synapse.api.handlers.rst
@@ -0,0 +1,21 @@
+synapse.api.handlers package
+============================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.api.handlers.events
+ synapse.api.handlers.factory
+ synapse.api.handlers.federation
+ synapse.api.handlers.register
+ synapse.api.handlers.room
+
+Module contents
+---------------
+
+.. automodule:: synapse.api.handlers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.notifier.rst b/docs/sphinx/synapse.api.notifier.rst
new file mode 100644
index 00000000..631b42a4
--- /dev/null
+++ b/docs/sphinx/synapse.api.notifier.rst
@@ -0,0 +1,7 @@
+synapse.api.notifier module
+===========================
+
+.. automodule:: synapse.api.notifier
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.register_events.rst b/docs/sphinx/synapse.api.register_events.rst
new file mode 100644
index 00000000..79ad4ce2
--- /dev/null
+++ b/docs/sphinx/synapse.api.register_events.rst
@@ -0,0 +1,7 @@
+synapse.api.register_events module
+==================================
+
+.. automodule:: synapse.api.register_events
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.room_events.rst b/docs/sphinx/synapse.api.room_events.rst
new file mode 100644
index 00000000..bead1711
--- /dev/null
+++ b/docs/sphinx/synapse.api.room_events.rst
@@ -0,0 +1,7 @@
+synapse.api.room_events module
+==============================
+
+.. automodule:: synapse.api.room_events
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.rst b/docs/sphinx/synapse.api.rst
new file mode 100644
index 00000000..f4d39ff3
--- /dev/null
+++ b/docs/sphinx/synapse.api.rst
@@ -0,0 +1,30 @@
+synapse.api package
+===================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ synapse.api.events
+ synapse.api.handlers
+ synapse.api.streams
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.api.auth
+ synapse.api.constants
+ synapse.api.errors
+ synapse.api.notifier
+ synapse.api.storage
+
+Module contents
+---------------
+
+.. automodule:: synapse.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.server.rst b/docs/sphinx/synapse.api.server.rst
new file mode 100644
index 00000000..b0160023
--- /dev/null
+++ b/docs/sphinx/synapse.api.server.rst
@@ -0,0 +1,7 @@
+synapse.api.server module
+=========================
+
+.. automodule:: synapse.api.server
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.storage.rst b/docs/sphinx/synapse.api.storage.rst
new file mode 100644
index 00000000..afa40685
--- /dev/null
+++ b/docs/sphinx/synapse.api.storage.rst
@@ -0,0 +1,7 @@
+synapse.api.storage module
+==========================
+
+.. automodule:: synapse.api.storage
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.stream.rst b/docs/sphinx/synapse.api.stream.rst
new file mode 100644
index 00000000..0d5e3f01
--- /dev/null
+++ b/docs/sphinx/synapse.api.stream.rst
@@ -0,0 +1,7 @@
+synapse.api.stream module
+=========================
+
+.. automodule:: synapse.api.stream
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.streams.event.rst b/docs/sphinx/synapse.api.streams.event.rst
new file mode 100644
index 00000000..2ac45a35
--- /dev/null
+++ b/docs/sphinx/synapse.api.streams.event.rst
@@ -0,0 +1,7 @@
+synapse.api.streams.event module
+================================
+
+.. automodule:: synapse.api.streams.event
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.api.streams.rst b/docs/sphinx/synapse.api.streams.rst
new file mode 100644
index 00000000..72eb205c
--- /dev/null
+++ b/docs/sphinx/synapse.api.streams.rst
@@ -0,0 +1,17 @@
+synapse.api.streams package
+===========================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.api.streams.event
+
+Module contents
+---------------
+
+.. automodule:: synapse.api.streams
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.app.homeserver.rst b/docs/sphinx/synapse.app.homeserver.rst
new file mode 100644
index 00000000..54b93da8
--- /dev/null
+++ b/docs/sphinx/synapse.app.homeserver.rst
@@ -0,0 +1,7 @@
+synapse.app.homeserver module
+=============================
+
+.. automodule:: synapse.app.homeserver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.app.rst b/docs/sphinx/synapse.app.rst
new file mode 100644
index 00000000..4535b798
--- /dev/null
+++ b/docs/sphinx/synapse.app.rst
@@ -0,0 +1,17 @@
+synapse.app package
+===================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.app.homeserver
+
+Module contents
+---------------
+
+.. automodule:: synapse.app
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.db.rst b/docs/sphinx/synapse.db.rst
new file mode 100644
index 00000000..83df6c03
--- /dev/null
+++ b/docs/sphinx/synapse.db.rst
@@ -0,0 +1,10 @@
+synapse.db package
+==================
+
+Module contents
+---------------
+
+.. automodule:: synapse.db
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.handler.rst b/docs/sphinx/synapse.federation.handler.rst
new file mode 100644
index 00000000..5597f5c4
--- /dev/null
+++ b/docs/sphinx/synapse.federation.handler.rst
@@ -0,0 +1,7 @@
+synapse.federation.handler module
+=================================
+
+.. automodule:: synapse.federation.handler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.messaging.rst b/docs/sphinx/synapse.federation.messaging.rst
new file mode 100644
index 00000000..4bbaabf3
--- /dev/null
+++ b/docs/sphinx/synapse.federation.messaging.rst
@@ -0,0 +1,7 @@
+synapse.federation.messaging module
+===================================
+
+.. automodule:: synapse.federation.messaging
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.pdu_codec.rst b/docs/sphinx/synapse.federation.pdu_codec.rst
new file mode 100644
index 00000000..8f0b15a6
--- /dev/null
+++ b/docs/sphinx/synapse.federation.pdu_codec.rst
@@ -0,0 +1,7 @@
+synapse.federation.pdu_codec module
+===================================
+
+.. automodule:: synapse.federation.pdu_codec
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.persistence.rst b/docs/sphinx/synapse.federation.persistence.rst
new file mode 100644
index 00000000..db7ab8ad
--- /dev/null
+++ b/docs/sphinx/synapse.federation.persistence.rst
@@ -0,0 +1,7 @@
+synapse.federation.persistence module
+=====================================
+
+.. automodule:: synapse.federation.persistence
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.replication.rst b/docs/sphinx/synapse.federation.replication.rst
new file mode 100644
index 00000000..49e26e09
--- /dev/null
+++ b/docs/sphinx/synapse.federation.replication.rst
@@ -0,0 +1,7 @@
+synapse.federation.replication module
+=====================================
+
+.. automodule:: synapse.federation.replication
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.rst b/docs/sphinx/synapse.federation.rst
new file mode 100644
index 00000000..7240c790
--- /dev/null
+++ b/docs/sphinx/synapse.federation.rst
@@ -0,0 +1,22 @@
+synapse.federation package
+==========================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.federation.handler
+ synapse.federation.pdu_codec
+ synapse.federation.persistence
+ synapse.federation.replication
+ synapse.federation.transport
+ synapse.federation.units
+
+Module contents
+---------------
+
+.. automodule:: synapse.federation
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.transport.rst b/docs/sphinx/synapse.federation.transport.rst
new file mode 100644
index 00000000..877956b3
--- /dev/null
+++ b/docs/sphinx/synapse.federation.transport.rst
@@ -0,0 +1,7 @@
+synapse.federation.transport module
+===================================
+
+.. automodule:: synapse.federation.transport
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.federation.units.rst b/docs/sphinx/synapse.federation.units.rst
new file mode 100644
index 00000000..8f9212b0
--- /dev/null
+++ b/docs/sphinx/synapse.federation.units.rst
@@ -0,0 +1,7 @@
+synapse.federation.units module
+===============================
+
+.. automodule:: synapse.federation.units
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.persistence.rst b/docs/sphinx/synapse.persistence.rst
new file mode 100644
index 00000000..37c0c237
--- /dev/null
+++ b/docs/sphinx/synapse.persistence.rst
@@ -0,0 +1,19 @@
+synapse.persistence package
+===========================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.persistence.service
+ synapse.persistence.tables
+ synapse.persistence.transactions
+
+Module contents
+---------------
+
+.. automodule:: synapse.persistence
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.persistence.service.rst b/docs/sphinx/synapse.persistence.service.rst
new file mode 100644
index 00000000..3514d3c7
--- /dev/null
+++ b/docs/sphinx/synapse.persistence.service.rst
@@ -0,0 +1,7 @@
+synapse.persistence.service module
+==================================
+
+.. automodule:: synapse.persistence.service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.persistence.tables.rst b/docs/sphinx/synapse.persistence.tables.rst
new file mode 100644
index 00000000..907b0276
--- /dev/null
+++ b/docs/sphinx/synapse.persistence.tables.rst
@@ -0,0 +1,7 @@
+synapse.persistence.tables module
+=================================
+
+.. automodule:: synapse.persistence.tables
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.persistence.transactions.rst b/docs/sphinx/synapse.persistence.transactions.rst
new file mode 100644
index 00000000..475c02a8
--- /dev/null
+++ b/docs/sphinx/synapse.persistence.transactions.rst
@@ -0,0 +1,7 @@
+synapse.persistence.transactions module
+=======================================
+
+.. automodule:: synapse.persistence.transactions
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rest.base.rst b/docs/sphinx/synapse.rest.base.rst
new file mode 100644
index 00000000..84d2d9b3
--- /dev/null
+++ b/docs/sphinx/synapse.rest.base.rst
@@ -0,0 +1,7 @@
+synapse.rest.base module
+========================
+
+.. automodule:: synapse.rest.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rest.events.rst b/docs/sphinx/synapse.rest.events.rst
new file mode 100644
index 00000000..ebbe26c7
--- /dev/null
+++ b/docs/sphinx/synapse.rest.events.rst
@@ -0,0 +1,7 @@
+synapse.rest.events module
+==========================
+
+.. automodule:: synapse.rest.events
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rest.register.rst b/docs/sphinx/synapse.rest.register.rst
new file mode 100644
index 00000000..a4a48a8a
--- /dev/null
+++ b/docs/sphinx/synapse.rest.register.rst
@@ -0,0 +1,7 @@
+synapse.rest.register module
+============================
+
+.. automodule:: synapse.rest.register
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rest.room.rst b/docs/sphinx/synapse.rest.room.rst
new file mode 100644
index 00000000..63fc5c28
--- /dev/null
+++ b/docs/sphinx/synapse.rest.room.rst
@@ -0,0 +1,7 @@
+synapse.rest.room module
+========================
+
+.. automodule:: synapse.rest.room
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rest.rst b/docs/sphinx/synapse.rest.rst
new file mode 100644
index 00000000..016af926
--- /dev/null
+++ b/docs/sphinx/synapse.rest.rst
@@ -0,0 +1,20 @@
+synapse.rest package
+====================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.rest.base
+ synapse.rest.events
+ synapse.rest.register
+ synapse.rest.room
+
+Module contents
+---------------
+
+.. automodule:: synapse.rest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.rst b/docs/sphinx/synapse.rst
new file mode 100644
index 00000000..e7869e0e
--- /dev/null
+++ b/docs/sphinx/synapse.rst
@@ -0,0 +1,30 @@
+synapse package
+===============
+
+Subpackages
+-----------
+
+.. toctree::
+
+ synapse.api
+ synapse.app
+ synapse.federation
+ synapse.persistence
+ synapse.rest
+ synapse.util
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.server
+ synapse.state
+
+Module contents
+---------------
+
+.. automodule:: synapse
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.server.rst b/docs/sphinx/synapse.server.rst
new file mode 100644
index 00000000..7f33f084
--- /dev/null
+++ b/docs/sphinx/synapse.server.rst
@@ -0,0 +1,7 @@
+synapse.server module
+=====================
+
+.. automodule:: synapse.server
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.state.rst b/docs/sphinx/synapse.state.rst
new file mode 100644
index 00000000..744be2a8
--- /dev/null
+++ b/docs/sphinx/synapse.state.rst
@@ -0,0 +1,7 @@
+synapse.state module
+====================
+
+.. automodule:: synapse.state
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.async.rst b/docs/sphinx/synapse.util.async.rst
new file mode 100644
index 00000000..542bb544
--- /dev/null
+++ b/docs/sphinx/synapse.util.async.rst
@@ -0,0 +1,7 @@
+synapse.util.async module
+=========================
+
+.. automodule:: synapse.util.async
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.dbutils.rst b/docs/sphinx/synapse.util.dbutils.rst
new file mode 100644
index 00000000..afaa9eb7
--- /dev/null
+++ b/docs/sphinx/synapse.util.dbutils.rst
@@ -0,0 +1,7 @@
+synapse.util.dbutils module
+===========================
+
+.. automodule:: synapse.util.dbutils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.http.rst b/docs/sphinx/synapse.util.http.rst
new file mode 100644
index 00000000..344af5a4
--- /dev/null
+++ b/docs/sphinx/synapse.util.http.rst
@@ -0,0 +1,7 @@
+synapse.util.http module
+========================
+
+.. automodule:: synapse.util.http
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.lockutils.rst b/docs/sphinx/synapse.util.lockutils.rst
new file mode 100644
index 00000000..16ee26ca
--- /dev/null
+++ b/docs/sphinx/synapse.util.lockutils.rst
@@ -0,0 +1,7 @@
+synapse.util.lockutils module
+=============================
+
+.. automodule:: synapse.util.lockutils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.logutils.rst b/docs/sphinx/synapse.util.logutils.rst
new file mode 100644
index 00000000..2b79fa7a
--- /dev/null
+++ b/docs/sphinx/synapse.util.logutils.rst
@@ -0,0 +1,7 @@
+synapse.util.logutils module
+============================
+
+.. automodule:: synapse.util.logutils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.rst b/docs/sphinx/synapse.util.rst
new file mode 100644
index 00000000..01a0c3a5
--- /dev/null
+++ b/docs/sphinx/synapse.util.rst
@@ -0,0 +1,21 @@
+synapse.util package
+====================
+
+Submodules
+----------
+
+.. toctree::
+
+ synapse.util.async
+ synapse.util.http
+ synapse.util.lockutils
+ synapse.util.logutils
+ synapse.util.stringutils
+
+Module contents
+---------------
+
+.. automodule:: synapse.util
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/sphinx/synapse.util.stringutils.rst b/docs/sphinx/synapse.util.stringutils.rst
new file mode 100644
index 00000000..ec626eee
--- /dev/null
+++ b/docs/sphinx/synapse.util.stringutils.rst
@@ -0,0 +1,7 @@
+synapse.util.stringutils module
+===============================
+
+.. automodule:: synapse.util.stringutils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
new file mode 100644
index 00000000..e2c73458
--- /dev/null
+++ b/docs/turn-howto.rst
@@ -0,0 +1,93 @@
+How to enable VoIP relaying on your Home Server with TURN
+
+Overview
+--------
+The synapse Matrix Home Server supports integration with TURN server via the
+TURN server REST API
+(http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00). This allows
+the Home Server to generate credentials that are valid for use on the TURN
+server through the use of a secret shared between the Home Server and the
+TURN server.
+
+This document described how to install coturn
+(https://code.google.com/p/coturn/) which also supports the TURN REST API,
+and integrate it with synapse.
+
+coturn Setup
+============
+
+ 1. Check out coturn::
+ svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
+ cd coturn
+
+ 2. Configure it::
+ ./configure
+
+ You may need to install libevent2: if so, you should do so
+ in the way recommended by your operating system.
+ You can ignore warnings about lack of database support: a
+ database is unnecessary for this purpose.
+
+ 3. Build and install it::
+ make
+ make install
+
+ 4. Make a config file in /etc/turnserver.conf. You can customise
+ a config file from turnserver.conf.default. The relevant
+ lines, with example values, are::
+
+ lt-cred-mech
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+
+ See turnserver.conf.default for explanations of the options.
+ One way to generate the static-auth-secret is with pwgen::
+
+ pwgen -s 64 1
+
+ 5. Ensure youe firewall allows traffic into the TURN server on
+ the ports you've configured it to listen on (remember to allow
+ both TCP and UDP if you've enabled both).
+
+ 6. If you've configured coturn to support TLS/DTLS, generate or
+ import your private key and certificate.
+
+ 7. Start the turn server::
+ bin/turnserver -o
+
+
+synapse Setup
+=============
+
+Your home server configuration file needs the following extra keys:
+
+ 1. "turn_uris": This needs to be a yaml list
+ of public-facing URIs for your TURN server to be given out
+ to your clients. Add separate entries for each transport your
+ TURN server supports.
+
+ 2. "turn_shared_secret": This is the secret shared between your Home
+ server and your TURN server, so you should set it to the same
+ string you used in turnserver.conf.
+
+ 3. "turn_user_lifetime": This is the amount of time credentials
+ generated by your Home Server are valid for (in milliseconds).
+ Shorter times offer less potential for abuse at the expense
+ of increased traffic between web clients and your home server
+ to refresh credentials. The TURN REST API specification recommends
+ one day (86400000).
+
+As an example, here is the relevant section of the config file for
+matrix.org::
+
+ turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
+ turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
+ turn_user_lifetime: 86400000
+
+Now, restart synapse::
+
+ cd /where/you/run/synapse
+ ./synctl restart
+
+...and your Home Server now supports VoIP relaying!
diff --git a/jenkins.sh b/jenkins.sh
new file mode 100755
index 00000000..4804022e
--- /dev/null
+++ b/jenkins.sh
@@ -0,0 +1,39 @@
+#!/bin/bash -eu
+
+export PYTHONDONTWRITEBYTECODE=yep
+
+# Output test results as junit xml
+export TRIAL_FLAGS="--reporter=subunit"
+export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
+
+# Output coverage to coverage.xml
+export DUMP_COVERAGE_COMMAND="coverage xml -o coverage.xml"
+
+# Output flake8 violations to violations.flake8.log
+# Don't exit with non-0 status code on Jenkins,
+# so that the build steps continue and a later step can decided whether to
+# UNSTABLE or FAILURE this build.
+export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
+
+tox
+
+: ${GIT_BRANCH:="$(git rev-parse --abbrev-ref HEAD)"}
+
+set +u
+. .tox/py27/bin/activate
+set -u
+
+rm -rf sytest
+git clone https://github.com/matrix-org/sytest.git sytest
+cd sytest
+
+git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
+
+: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5}
+: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5}
+: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5}
+export PERL5LIB PERL_MB_OPT PERL_MM_OPT
+
+./install-deps.pl
+
+./run-tests.pl -O tap --synapse-directory .. --all > results.tap
diff --git a/pylint.cfg b/pylint.cfg
new file mode 100644
index 00000000..23689971
--- /dev/null
+++ b/pylint.cfg
@@ -0,0 +1,280 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=missing-docstring
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the beginning of the name of dummy variables
+# (i.e. not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,apply,input
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct attribute names in class
+# bodies
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/scripts-dev/check_auth.py b/scripts-dev/check_auth.py
new file mode 100644
index 00000000..4fa8792a
--- /dev/null
+++ b/scripts-dev/check_auth.py
@@ -0,0 +1,64 @@
+from synapse.events import FrozenEvent
+from synapse.api.auth import Auth
+
+from mock import Mock
+
+import argparse
+import itertools
+import json
+import sys
+
+
+def check_auth(auth, auth_chain, events):
+ auth_chain.sort(key=lambda e: e.depth)
+
+ auth_map = {
+ e.event_id: e
+ for e in auth_chain
+ }
+
+ create_events = {}
+ for e in auth_chain:
+ if e.type == "m.room.create":
+ create_events[e.room_id] = e
+
+ for e in itertools.chain(auth_chain, events):
+ auth_events_list = [auth_map[i] for i, _ in e.auth_events]
+
+ auth_events = {
+ (e.type, e.state_key): e
+ for e in auth_events_list
+ }
+
+ auth_events[("m.room.create", "")] = create_events[e.room_id]
+
+ try:
+ auth.check(e, auth_events=auth_events)
+ except Exception as ex:
+ print "Failed:", e.event_id, e.type, e.state_key
+ print "Auth_events:", auth_events
+ print ex
+ print json.dumps(e.get_dict(), sort_keys=True, indent=4)
+ # raise
+ print "Success:", e.event_id, e.type, e.state_key
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ 'json',
+ nargs='?',
+ type=argparse.FileType('r'),
+ default=sys.stdin,
+ )
+
+ args = parser.parse_args()
+
+ js = json.load(args.json)
+
+ auth = Auth(Mock())
+ check_auth(
+ auth,
+ [FrozenEvent(d) for d in js["auth_chain"]],
+ [FrozenEvent(d) for d in js.get("pdus", [])],
+ )
diff --git a/scripts-dev/check_event_hash.py b/scripts-dev/check_event_hash.py
new file mode 100644
index 00000000..7ccae34d
--- /dev/null
+++ b/scripts-dev/check_event_hash.py
@@ -0,0 +1,50 @@
+from synapse.crypto.event_signing import *
+from unpaddedbase64 import encode_base64
+
+import argparse
+import hashlib
+import sys
+import json
+
+
+class dictobj(dict):
+ def __init__(self, *args, **kargs):
+ dict.__init__(self, *args, **kargs)
+ self.__dict__ = self
+
+ def get_dict(self):
+ return dict(self)
+
+ def get_full_dict(self):
+ return dict(self)
+
+ def get_pdu_json(self):
+ return dict(self)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
+ default=sys.stdin)
+ args = parser.parse_args()
+ logging.basicConfig()
+
+ event_json = dictobj(json.load(args.input_json))
+
+ algorithms = {
+ "sha256": hashlib.sha256,
+ }
+
+ for alg_name in event_json.hashes:
+ if check_event_content_hash(event_json, algorithms[alg_name]):
+ print "PASS content hash %s" % (alg_name,)
+ else:
+ print "FAIL content hash %s" % (alg_name,)
+
+ for algorithm in algorithms.values():
+ name, h_bytes = compute_event_reference_hash(event_json, algorithm)
+ print "Reference hash %s: %s" % (name, encode_base64(h_bytes))
+
+if __name__=="__main__":
+ main()
+
diff --git a/scripts-dev/check_signature.py b/scripts-dev/check_signature.py
new file mode 100644
index 00000000..07957790
--- /dev/null
+++ b/scripts-dev/check_signature.py
@@ -0,0 +1,71 @@
+
+from signedjson.sign import verify_signed_json
+from signedjson.key import decode_verify_key_bytes, write_signing_keys
+from unpaddedbase64 import decode_base64
+
+import urllib2
+import json
+import sys
+import dns.resolver
+import pprint
+import argparse
+import logging
+
+def get_targets(server_name):
+ if ":" in server_name:
+ target, port = server_name.split(":")
+ yield (target, int(port))
+ return
+ try:
+ answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
+ for srv in answers:
+ yield (srv.target, srv.port)
+ except dns.resolver.NXDOMAIN:
+ yield (server_name, 8448)
+
+def get_server_keys(server_name, target, port):
+ url = "https://%s:%i/_matrix/key/v1" % (target, port)
+ keys = json.load(urllib2.urlopen(url))
+ verify_keys = {}
+ for key_id, key_base64 in keys["verify_keys"].items():
+ verify_key = decode_verify_key_bytes(key_id, decode_base64(key_base64))
+ verify_signed_json(keys, server_name, verify_key)
+ verify_keys[key_id] = verify_key
+ return verify_keys
+
+def main():
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("signature_name")
+ parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
+ default=sys.stdin)
+
+ args = parser.parse_args()
+ logging.basicConfig()
+
+ server_name = args.signature_name
+ keys = {}
+ for target, port in get_targets(server_name):
+ try:
+ keys = get_server_keys(server_name, target, port)
+ print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port)
+ write_signing_keys(sys.stdout, keys.values())
+ break
+ except:
+ logging.exception("Error talking to %s:%s", target, port)
+
+ json_to_check = json.load(args.input_json)
+ print "Checking JSON:"
+ for key_id in json_to_check["signatures"][args.signature_name]:
+ try:
+ key = keys[key_id]
+ verify_signed_json(json_to_check, args.signature_name, key)
+ print "PASS %s" % (key_id,)
+ except:
+ logging.exception("Check for key %s failed" % (key_id,))
+ print "FAIL %s" % (key_id,)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
new file mode 100644
index 00000000..151551f2
--- /dev/null
+++ b/scripts-dev/convert_server_keys.py
@@ -0,0 +1,116 @@
+import psycopg2
+import yaml
+import sys
+import json
+import time
+import hashlib
+from unpaddedbase64 import encode_base64
+from signedjson.key import read_signing_keys
+from signedjson.sign import sign_json
+from canonicaljson import encode_canonical_json
+
+
+def select_v1_keys(connection):
+ cursor = connection.cursor()
+ cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
+ rows = cursor.fetchall()
+ cursor.close()
+ results = {}
+ for server_name, key_id, verify_key in rows:
+ results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
+ return results
+
+
+def select_v1_certs(connection):
+ cursor = connection.cursor()
+ cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
+ rows = cursor.fetchall()
+ cursor.close()
+ results = {}
+ for server_name, tls_certificate in rows:
+ results[server_name] = tls_certificate
+ return results
+
+
+def select_v2_json(connection):
+ cursor = connection.cursor()
+ cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
+ rows = cursor.fetchall()
+ cursor.close()
+ results = {}
+ for server_name, key_id, key_json in rows:
+ results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
+ return results
+
+
+def convert_v1_to_v2(server_name, valid_until, keys, certificate):
+ return {
+ "old_verify_keys": {},
+ "server_name": server_name,
+ "verify_keys": {
+ key_id: {"key": key}
+ for key_id, key in keys.items()
+ },
+ "valid_until_ts": valid_until,
+ "tls_fingerprints": [fingerprint(certificate)],
+ }
+
+
+def fingerprint(certificate):
+ finger = hashlib.sha256(certificate)
+ return {"sha256": encode_base64(finger.digest())}
+
+
+def rows_v2(server, json):
+ valid_until = json["valid_until_ts"]
+ key_json = encode_canonical_json(json)
+ for key_id in json["verify_keys"]:
+ yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
+
+
+def main():
+ config = yaml.load(open(sys.argv[1]))
+ valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
+
+ server_name = config["server_name"]
+ signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
+
+ database = config["database"]
+ assert database["name"] == "psycopg2", "Can only convert for postgresql"
+ args = database["args"]
+ args.pop("cp_max")
+ args.pop("cp_min")
+ connection = psycopg2.connect(**args)
+ keys = select_v1_keys(connection)
+ certificates = select_v1_certs(connection)
+ json = select_v2_json(connection)
+
+ result = {}
+ for server in keys:
+ if not server in json:
+ v2_json = convert_v1_to_v2(
+ server, valid_until, keys[server], certificates[server]
+ )
+ v2_json = sign_json(v2_json, server_name, signing_key)
+ result[server] = v2_json
+
+ yaml.safe_dump(result, sys.stdout, default_flow_style=False)
+
+ rows = list(
+ row for server, json in result.items()
+ for row in rows_v2(server, json)
+ )
+
+ cursor = connection.cursor()
+ cursor.executemany(
+ "INSERT INTO server_keys_json ("
+ " server_name, key_id, from_server,"
+ " ts_added_ms, ts_valid_until_ms, key_json"
+ ") VALUES (%s, %s, %s, %s, %s, %s)",
+ rows
+ )
+ connection.commit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl
new file mode 100755
index 00000000..890e51e5
--- /dev/null
+++ b/scripts-dev/copyrighter-sql.pl
@@ -0,0 +1,33 @@
+#!/usr/bin/perl -pi
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+$copyright = <<EOT;
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+EOT
+
+s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/copyrighter.pl b/scripts-dev/copyrighter.pl
new file mode 100755
index 00000000..a913d74c
--- /dev/null
+++ b/scripts-dev/copyrighter.pl
@@ -0,0 +1,33 @@
+#!/usr/bin/perl -pi
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+$copyright = <<EOT;
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+EOT
+
+s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/database-save.sh b/scripts-dev/database-save.sh
new file mode 100755
index 00000000..040c8a49
--- /dev/null
+++ b/scripts-dev/database-save.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# This script will write a dump file of local user state if you want to splat
+# your entire server database and start again but preserve the identity of
+# local users and their access tokens.
+#
+# To restore it, use
+#
+# $ sqlite3 homeserver.db < table-save.sql
+
+sqlite3 "$1" <<'EOF' >table-save.sql
+.dump users
+.dump access_tokens
+.dump presence
+.dump profiles
+EOF
diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py
new file mode 100755
index 00000000..f0d0cd8a
--- /dev/null
+++ b/scripts-dev/definitions.py
@@ -0,0 +1,142 @@
+#! /usr/bin/python
+
+import ast
+import yaml
+
+class DefinitionVisitor(ast.NodeVisitor):
+ def __init__(self):
+ super(DefinitionVisitor, self).__init__()
+ self.functions = {}
+ self.classes = {}
+ self.names = {}
+ self.attrs = set()
+ self.definitions = {
+ 'def': self.functions,
+ 'class': self.classes,
+ 'names': self.names,
+ 'attrs': self.attrs,
+ }
+
+ def visit_Name(self, node):
+ self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
+
+ def visit_Attribute(self, node):
+ self.attrs.add(node.attr)
+ for child in ast.iter_child_nodes(node):
+ self.visit(child)
+
+ def visit_ClassDef(self, node):
+ visitor = DefinitionVisitor()
+ self.classes[node.name] = visitor.definitions
+ for child in ast.iter_child_nodes(node):
+ visitor.visit(child)
+
+ def visit_FunctionDef(self, node):
+ visitor = DefinitionVisitor()
+ self.functions[node.name] = visitor.definitions
+ for child in ast.iter_child_nodes(node):
+ visitor.visit(child)
+
+
+def non_empty(defs):
+ functions = {name: non_empty(f) for name, f in defs['def'].items()}
+ classes = {name: non_empty(f) for name, f in defs['class'].items()}
+ result = {}
+ if functions: result['def'] = functions
+ if classes: result['class'] = classes
+ names = defs['names']
+ uses = []
+ for name in names.get('Load', ()):
+ if name not in names.get('Param', ()) and name not in names.get('Store', ()):
+ uses.append(name)
+ uses.extend(defs['attrs'])
+ if uses: result['uses'] = uses
+ result['names'] = names
+ result['attrs'] = defs['attrs']
+ return result
+
+
+def definitions_in_code(input_code):
+ input_ast = ast.parse(input_code)
+ visitor = DefinitionVisitor()
+ visitor.visit(input_ast)
+ definitions = non_empty(visitor.definitions)
+ return definitions
+
+
+def definitions_in_file(filepath):
+ with open(filepath) as f:
+ return definitions_in_code(f.read())
+
+
+def defined_names(prefix, defs, names):
+ for name, funcs in defs.get('def', {}).items():
+ names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
+ defined_names(prefix + name + ".", funcs, names)
+
+ for name, funcs in defs.get('class', {}).items():
+ names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
+ defined_names(prefix + name + ".", funcs, names)
+
+
+def used_names(prefix, defs, names):
+ for name, funcs in defs.get('def', {}).items():
+ used_names(prefix + name + ".", funcs, names)
+
+ for name, funcs in defs.get('class', {}).items():
+ used_names(prefix + name + ".", funcs, names)
+
+ for used in defs.get('uses', ()):
+ if used in names:
+ names[used].setdefault('used', []).append(prefix.rstrip('.'))
+
+
+if __name__ == '__main__':
+ import sys, os, argparse, re
+
+ parser = argparse.ArgumentParser(description='Find definitions.')
+ parser.add_argument(
+ "--unused", action="store_true", help="Only list unused definitions"
+ )
+ parser.add_argument(
+ "--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
+ )
+ parser.add_argument(
+ "--pattern", action="append", metavar="REGEXP",
+ help="Search for a pattern"
+ )
+ parser.add_argument(
+ "directories", nargs='+', metavar="DIR",
+ help="Directories to search for definitions"
+ )
+ args = parser.parse_args()
+
+ definitions = {}
+ for directory in args.directories:
+ for root, dirs, files in os.walk(directory):
+ for filename in files:
+ if filename.endswith(".py"):
+ filepath = os.path.join(root, filename)
+ definitions[filepath] = definitions_in_file(filepath)
+
+ names = {}
+ for filepath, defs in definitions.items():
+ defined_names(filepath + ":", defs, names)
+
+ for filepath, defs in definitions.items():
+ used_names(filepath + ":", defs, names)
+
+ patterns = [re.compile(pattern) for pattern in args.pattern or ()]
+ ignore = [re.compile(pattern) for pattern in args.ignore or ()]
+
+ result = {}
+ for name, definition in names.items():
+ if patterns and not any(pattern.match(name) for pattern in patterns):
+ continue
+ if ignore and any(pattern.match(name) for pattern in ignore):
+ continue
+ if args.unused and definition.get('used'):
+ continue
+ result[name] = definition
+
+ yaml.dump(result, sys.stdout, default_flow_style=False)
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
new file mode 100644
index 00000000..ea62dceb
--- /dev/null
+++ b/scripts-dev/federation_client.py
@@ -0,0 +1,146 @@
+import nacl.signing
+import json
+import base64
+import requests
+import sys
+import srvlookup
+
+
+def encode_base64(input_bytes):
+ """Encode bytes as a base64 string without any padding."""
+
+ input_len = len(input_bytes)
+ output_len = 4 * ((input_len + 2) // 3) + (input_len + 2) % 3 - 2
+ output_bytes = base64.b64encode(input_bytes)
+ output_string = output_bytes[:output_len].decode("ascii")
+ return output_string
+
+
+def decode_base64(input_string):
+ """Decode a base64 string to bytes inferring padding from the length of the
+ string."""
+
+ input_bytes = input_string.encode("ascii")
+ input_len = len(input_bytes)
+ padding = b"=" * (3 - ((input_len + 3) % 4))
+ output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
+ output_bytes = base64.b64decode(input_bytes + padding)
+ return output_bytes[:output_len]
+
+
+def encode_canonical_json(value):
+ return json.dumps(
+ value,
+ # Encode code-points outside of ASCII as UTF-8 rather than \u escapes
+ ensure_ascii=False,
+ # Remove unecessary white space.
+ separators=(',',':'),
+ # Sort the keys of dictionaries.
+ sort_keys=True,
+ # Encode the resulting unicode as UTF-8 bytes.
+ ).encode("UTF-8")
+
+
+def sign_json(json_object, signing_key, signing_name):
+ signatures = json_object.pop("signatures", {})
+ unsigned = json_object.pop("unsigned", None)
+
+ signed = signing_key.sign(encode_canonical_json(json_object))
+ signature_base64 = encode_base64(signed.signature)
+
+ key_id = "%s:%s" % (signing_key.alg, signing_key.version)
+ signatures.setdefault(signing_name, {})[key_id] = signature_base64
+
+ json_object["signatures"] = signatures
+ if unsigned is not None:
+ json_object["unsigned"] = unsigned
+
+ return json_object
+
+
+NACL_ED25519 = "ed25519"
+
+def decode_signing_key_base64(algorithm, version, key_base64):
+ """Decode a base64 encoded signing key
+ Args:
+ algorithm (str): The algorithm the key is for (currently "ed25519").
+ version (str): Identifies this key out of the keys for this entity.
+ key_base64 (str): Base64 encoded bytes of the key.
+ Returns:
+ A SigningKey object.
+ """
+ if algorithm == NACL_ED25519:
+ key_bytes = decode_base64(key_base64)
+ key = nacl.signing.SigningKey(key_bytes)
+ key.version = version
+ key.alg = NACL_ED25519
+ return key
+ else:
+ raise ValueError("Unsupported algorithm %s" % (algorithm,))
+
+
+def read_signing_keys(stream):
+ """Reads a list of keys from a stream
+ Args:
+ stream : A stream to iterate for keys.
+ Returns:
+ list of SigningKey objects.
+ """
+ keys = []
+ for line in stream:
+ algorithm, version, key_base64 = line.split()
+ keys.append(decode_signing_key_base64(algorithm, version, key_base64))
+ return keys
+
+
+def lookup(destination, path):
+ if ":" in destination:
+ return "https://%s%s" % (destination, path)
+ else:
+ try:
+ srv = srvlookup.lookup("matrix", "tcp", destination)[0]
+ return "https://%s:%d%s" % (srv.host, srv.port, path)
+ except:
+ return "https://%s:%d%s" % (destination, 8448, path)
+
+def get_json(origin_name, origin_key, destination, path):
+ request_json = {
+ "method": "GET",
+ "uri": path,
+ "origin": origin_name,
+ "destination": destination,
+ }
+
+ signed_json = sign_json(request_json, origin_key, origin_name)
+
+ authorization_headers = []
+
+ for key, sig in signed_json["signatures"][origin_name].items():
+ authorization_headers.append(bytes(
+ "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
+ origin_name, key, sig,
+ )
+ ))
+
+ result = requests.get(
+ lookup(destination, path),
+ headers={"Authorization": authorization_headers[0]},
+ verify=False,
+ )
+ return result.json()
+
+
+def main():
+ origin_name, keyfile, destination, path = sys.argv[1:]
+
+ with open(keyfile) as f:
+ key = read_signing_keys(f)[0]
+
+ result = get_json(
+ origin_name, key, destination, "/_matrix/federation/v1/" + path
+ )
+
+ json.dump(result, sys.stdout)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py
new file mode 100644
index 00000000..616d6a10
--- /dev/null
+++ b/scripts-dev/hash_history.py
@@ -0,0 +1,69 @@
+from synapse.storage.pdu import PduStore
+from synapse.storage.signatures import SignatureStore
+from synapse.storage._base import SQLBaseStore
+from synapse.federation.units import Pdu
+from synapse.crypto.event_signing import (
+ add_event_pdu_content_hash, compute_pdu_event_reference_hash
+)
+from synapse.api.events.utils import prune_pdu
+from unpaddedbase64 import encode_base64, decode_base64
+from canonicaljson import encode_canonical_json
+import sqlite3
+import sys
+
+class Store(object):
+ _get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
+ _get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
+ _get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
+ _get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"]
+ _store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
+ _store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"]
+ _store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
+ _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
+
+
+store = Store()
+
+
+def select_pdus(cursor):
+ cursor.execute(
+ "SELECT pdu_id, origin FROM pdus ORDER BY depth ASC"
+ )
+
+ ids = cursor.fetchall()
+
+ pdu_tuples = store._get_pdu_tuples(cursor, ids)
+
+ pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
+
+ reference_hashes = {}
+
+ for pdu in pdus:
+ try:
+ if pdu.prev_pdus:
+ print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
+ for pdu_id, origin, hashes in pdu.prev_pdus:
+ ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
+ hashes[ref_alg] = encode_base64(ref_hsh)
+ store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh)
+ print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
+ pdu = add_event_pdu_content_hash(pdu)
+ ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
+ reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
+ store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh)
+
+ for alg, hsh_base64 in pdu.hashes.items():
+ print alg, hsh_base64
+ store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64))
+
+ except:
+ print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus
+
+def main():
+ conn = sqlite3.connect(sys.argv[1])
+ cursor = conn.cursor()
+ select_pdus(cursor)
+ conn.commit()
+
+if __name__=='__main__':
+ main()
diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl
new file mode 100755
index 00000000..cbff63e2
--- /dev/null
+++ b/scripts-dev/make_identicons.pl
@@ -0,0 +1,39 @@
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+
+use DBI;
+use DBD::SQLite;
+use JSON;
+use Getopt::Long;
+
+my $db; # = "homeserver.db";
+my $server = "http://localhost:8008";
+my $size = 320;
+
+GetOptions("db|d=s", \$db,
+ "server|s=s", \$server,
+ "width|w=i", \$size) or usage();
+
+usage() unless $db;
+
+my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
+
+my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
+
+foreach (@$res) {
+ my ($token, $mxid) = ($_->[0], $_->[1]);
+ my ($user_id) = ($mxid =~ m/@(.*):/);
+ my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
+ if (!$url || $url =~ /#auto$/) {
+ `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
+ my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
+ my $content_uri = from_json($json)->{content_uri};
+ `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
+ }
+}
+
+sub usage {
+ die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
+} \ No newline at end of file
diff --git a/scripts-dev/nuke-room-from-db.sh b/scripts-dev/nuke-room-from-db.sh
new file mode 100755
index 00000000..58c036c8
--- /dev/null
+++ b/scripts-dev/nuke-room-from-db.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+## CAUTION:
+## This script will remove (hopefully) all trace of the given room ID from
+## your homeserver.db
+
+## Do not run it lightly.
+
+ROOMID="$1"
+
+sqlite3 homeserver.db <<EOF
+DELETE FROM context_depth WHERE context = '$ROOMID';
+DELETE FROM current_state WHERE context = '$ROOMID';
+DELETE FROM feedback WHERE room_id = '$ROOMID';
+DELETE FROM messages WHERE room_id = '$ROOMID';
+DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
+DELETE FROM pdu_edges WHERE context = '$ROOMID';
+DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
+DELETE FROM pdus WHERE context = '$ROOMID';
+DELETE FROM room_data WHERE room_id = '$ROOMID';
+DELETE FROM room_memberships WHERE room_id = '$ROOMID';
+DELETE FROM rooms WHERE room_id = '$ROOMID';
+DELETE FROM state_pdus WHERE context = '$ROOMID';
+EOF
diff --git a/scripts-dev/sphinx_api_docs.sh b/scripts-dev/sphinx_api_docs.sh
new file mode 100644
index 00000000..ee72b296
--- /dev/null
+++ b/scripts-dev/sphinx_api_docs.sh
@@ -0,0 +1 @@
+sphinx-apidoc -o docs/sphinx/ synapse/ -ef
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
new file mode 100755
index 00000000..4a520bdb
--- /dev/null
+++ b/scripts/register_new_matrix_user
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import getpass
+import hashlib
+import hmac
+import json
+import sys
+import urllib2
+import yaml
+
+
+def request_registration(user, password, server_location, shared_secret):
+ mac = hmac.new(
+ key=shared_secret,
+ msg=user,
+ digestmod=hashlib.sha1,
+ ).hexdigest()
+
+ data = {
+ "user": user,
+ "password": password,
+ "mac": mac,
+ "type": "org.matrix.login.shared_secret",
+ }
+
+ server_location = server_location.rstrip("/")
+
+ print "Sending registration request..."
+
+ req = urllib2.Request(
+ "%s/_matrix/client/api/v1/register" % (server_location,),
+ data=json.dumps(data),
+ headers={'Content-Type': 'application/json'}
+ )
+ try:
+ if sys.version_info[:3] >= (2, 7, 9):
+ # As of version 2.7.9, urllib2 now checks SSL certs
+ import ssl
+ f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
+ else:
+ f = urllib2.urlopen(req)
+ f.read()
+ f.close()
+ print "Success."
+ except urllib2.HTTPError as e:
+ print "ERROR! Received %d %s" % (e.code, e.reason,)
+ if 400 <= e.code < 500:
+ if e.info().type == "application/json":
+ resp = json.load(e)
+ if "error" in resp:
+ print resp["error"]
+ sys.exit(1)
+
+
+def register_new_user(user, password, server_location, shared_secret):
+ if not user:
+ try:
+ default_user = getpass.getuser()
+ except:
+ default_user = None
+
+ if default_user:
+ user = raw_input("New user localpart [%s]: " % (default_user,))
+ if not user:
+ user = default_user
+ else:
+ user = raw_input("New user localpart: ")
+
+ if not user:
+ print "Invalid user name"
+ sys.exit(1)
+
+ if not password:
+ password = getpass.getpass("Password: ")
+
+ if not password:
+ print "Password cannot be blank."
+ sys.exit(1)
+
+ confirm_password = getpass.getpass("Confirm password: ")
+
+ if password != confirm_password:
+ print "Passwords do not match"
+ sys.exit(1)
+
+ request_registration(user, password, server_location, shared_secret)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Used to register new users with a given home server when"
+ " registration has been disabled. The home server must be"
+ " configured with the 'registration_shared_secret' option"
+ " set.",
+ )
+ parser.add_argument(
+ "-u", "--user",
+ default=None,
+ help="Local part of the new user. Will prompt if omitted.",
+ )
+ parser.add_argument(
+ "-p", "--password",
+ default=None,
+ help="New password for user. Will prompt if omitted.",
+ )
+
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument(
+ "-c", "--config",
+ type=argparse.FileType('r'),
+ help="Path to server config file. Used to read in shared secret.",
+ )
+
+ group.add_argument(
+ "-k", "--shared-secret",
+ help="Shared secret as defined in server config file.",
+ )
+
+ parser.add_argument(
+ "server_url",
+ default="https://localhost:8448",
+ nargs='?',
+ help="URL to use to talk to the home server. Defaults to "
+ " 'https://localhost:8448'.",
+ )
+
+ args = parser.parse_args()
+
+ if "config" in args and args.config:
+ config = yaml.safe_load(args.config)
+ secret = config.get("registration_shared_secret", None)
+ if not secret:
+ print "No 'registration_shared_secret' defined in config."
+ sys.exit(1)
+ else:
+ secret = args.shared_secret
+
+ register_new_user(args.user, args.password, args.server_url, secret)
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
new file mode 100755
index 00000000..62515997
--- /dev/null
+++ b/scripts/synapse_port_db
@@ -0,0 +1,761 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer, reactor
+from twisted.enterprise import adbapi
+
+from synapse.storage._base import LoggingTransaction, SQLBaseStore
+from synapse.storage.engines import create_engine
+
+import argparse
+import curses
+import logging
+import sys
+import time
+import traceback
+import yaml
+
+
+logger = logging.getLogger("synapse_port_db")
+
+
+BOOLEAN_COLUMNS = {
+ "events": ["processed", "outlier"],
+ "rooms": ["is_public"],
+ "event_edges": ["is_state"],
+ "presence_list": ["accepted"],
+}
+
+
+APPEND_ONLY_TABLES = [
+ "event_content_hashes",
+ "event_reference_hashes",
+ "event_signatures",
+ "event_edge_hashes",
+ "events",
+ "event_json",
+ "state_events",
+ "room_memberships",
+ "feedback",
+ "topics",
+ "room_names",
+ "rooms",
+ "local_media_repository",
+ "local_media_repository_thumbnails",
+ "remote_media_cache",
+ "remote_media_cache_thumbnails",
+ "redactions",
+ "event_edges",
+ "event_auth",
+ "received_transactions",
+ "sent_transactions",
+ "transaction_id_to_pdu",
+ "users",
+ "state_groups",
+ "state_groups_state",
+ "event_to_state_groups",
+ "rejections",
+]
+
+
+end_error_exec_info = None
+
+
+class Store(object):
+ """This object is used to pull out some of the convenience API from the
+ Storage layer.
+
+ *All* database interactions should go through this object.
+ """
+ def __init__(self, db_pool, engine):
+ self.db_pool = db_pool
+ self.database_engine = engine
+
+ _simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
+ _simple_insert = SQLBaseStore.__dict__["_simple_insert"]
+
+ _simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
+ _simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
+ _simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
+ _simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
+
+ _simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
+ _simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
+
+ def runInteraction(self, desc, func, *args, **kwargs):
+ def r(conn):
+ try:
+ i = 0
+ N = 5
+ while True:
+ try:
+ txn = conn.cursor()
+ return func(
+ LoggingTransaction(txn, desc, self.database_engine, []),
+ *args, **kwargs
+ )
+ except self.database_engine.module.DatabaseError as e:
+ if self.database_engine.is_deadlock(e):
+ logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
+ if i < N:
+ i += 1
+ conn.rollback()
+ continue
+ raise
+ except Exception as e:
+ logger.debug("[TXN FAIL] {%s} %s", desc, e)
+ raise
+
+ return self.db_pool.runWithConnection(r)
+
+ def execute(self, f, *args, **kwargs):
+ return self.runInteraction(f.__name__, f, *args, **kwargs)
+
+ def execute_sql(self, sql, *args):
+ def r(txn):
+ txn.execute(sql, args)
+ return txn.fetchall()
+ return self.runInteraction("execute_sql", r)
+
+ def insert_many_txn(self, txn, table, headers, rows):
+ sql = "INSERT INTO %s (%s) VALUES (%s)" % (
+ table,
+ ", ".join(k for k in headers),
+ ", ".join("%s" for _ in headers)
+ )
+
+ try:
+ txn.executemany(sql, rows)
+ except:
+ logger.exception(
+ "Failed to insert: %s",
+ table,
+ )
+ raise
+
+
+class Porter(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ @defer.inlineCallbacks
+ def setup_table(self, table):
+ if table in APPEND_ONLY_TABLES:
+ # It's safe to just carry on inserting.
+ next_chunk = yield self.postgres_store._simple_select_one_onecol(
+ table="port_from_sqlite3",
+ keyvalues={"table_name": table},
+ retcol="rowid",
+ allow_none=True,
+ )
+
+ total_to_port = None
+ if next_chunk is None:
+ if table == "sent_transactions":
+ next_chunk, already_ported, total_to_port = (
+ yield self._setup_sent_transactions()
+ )
+ else:
+ yield self.postgres_store._simple_insert(
+ table="port_from_sqlite3",
+ values={"table_name": table, "rowid": 1}
+ )
+
+ next_chunk = 1
+ already_ported = 0
+
+ if total_to_port is None:
+ already_ported, total_to_port = yield self._get_total_count_to_port(
+ table, next_chunk
+ )
+ else:
+ def delete_all(txn):
+ txn.execute(
+ "DELETE FROM port_from_sqlite3 WHERE table_name = %s",
+ (table,)
+ )
+ txn.execute("TRUNCATE %s CASCADE" % (table,))
+
+ yield self.postgres_store.execute(delete_all)
+
+ yield self.postgres_store._simple_insert(
+ table="port_from_sqlite3",
+ values={"table_name": table, "rowid": 0}
+ )
+
+ next_chunk = 1
+
+ already_ported, total_to_port = yield self._get_total_count_to_port(
+ table, next_chunk
+ )
+
+ defer.returnValue((table, already_ported, total_to_port, next_chunk))
+
+ @defer.inlineCallbacks
+ def handle_table(self, table, postgres_size, table_size, next_chunk):
+ if not table_size:
+ return
+
+ self.progress.add_table(table, postgres_size, table_size)
+
+ select = (
+ "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
+ % (table,)
+ )
+
+ while True:
+ def r(txn):
+ txn.execute(select, (next_chunk, self.batch_size,))
+ rows = txn.fetchall()
+ headers = [column[0] for column in txn.description]
+
+ return headers, rows
+
+ headers, rows = yield self.sqlite_store.runInteraction("select", r)
+
+ if rows:
+ next_chunk = rows[-1][0] + 1
+
+ self._convert_rows(table, headers, rows)
+
+ def insert(txn):
+ self.postgres_store.insert_many_txn(
+ txn, table, headers[1:], rows
+ )
+
+ self.postgres_store._simple_update_one_txn(
+ txn,
+ table="port_from_sqlite3",
+ keyvalues={"table_name": table},
+ updatevalues={"rowid": next_chunk},
+ )
+
+ yield self.postgres_store.execute(insert)
+
+ postgres_size += len(rows)
+
+ self.progress.update(table, postgres_size)
+ else:
+ return
+
+ def setup_db(self, db_config, database_engine):
+ db_conn = database_engine.module.connect(
+ **{
+ k: v for k, v in db_config.get("args", {}).items()
+ if not k.startswith("cp_")
+ }
+ )
+
+ database_engine.prepare_database(db_conn)
+
+ db_conn.commit()
+
+ @defer.inlineCallbacks
+ def run(self):
+ try:
+ sqlite_db_pool = adbapi.ConnectionPool(
+ self.sqlite_config["name"],
+ **self.sqlite_config["args"]
+ )
+
+ postgres_db_pool = adbapi.ConnectionPool(
+ self.postgres_config["name"],
+ **self.postgres_config["args"]
+ )
+
+ sqlite_engine = create_engine("sqlite3")
+ postgres_engine = create_engine("psycopg2")
+
+ self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
+ self.postgres_store = Store(postgres_db_pool, postgres_engine)
+
+ yield self.postgres_store.execute(
+ postgres_engine.check_database
+ )
+
+ # Step 1. Set up databases.
+ self.progress.set_state("Preparing SQLite3")
+ self.setup_db(sqlite_config, sqlite_engine)
+
+ self.progress.set_state("Preparing PostgreSQL")
+ self.setup_db(postgres_config, postgres_engine)
+
+ # Step 2. Get tables.
+ self.progress.set_state("Fetching tables")
+ sqlite_tables = yield self.sqlite_store._simple_select_onecol(
+ table="sqlite_master",
+ keyvalues={
+ "type": "table",
+ },
+ retcol="name",
+ )
+
+ postgres_tables = yield self.postgres_store._simple_select_onecol(
+ table="information_schema.tables",
+ keyvalues={
+ "table_schema": "public",
+ },
+ retcol="distinct table_name",
+ )
+
+ tables = set(sqlite_tables) & set(postgres_tables)
+
+ self.progress.set_state("Creating tables")
+
+ logger.info("Found %d tables", len(tables))
+
+ def create_port_table(txn):
+ txn.execute(
+ "CREATE TABLE port_from_sqlite3 ("
+ " table_name varchar(100) NOT NULL UNIQUE,"
+ " rowid bigint NOT NULL"
+ ")"
+ )
+
+ try:
+ yield self.postgres_store.runInteraction(
+ "create_port_table", create_port_table
+ )
+ except Exception as e:
+ logger.info("Failed to create port table: %s", e)
+
+ self.progress.set_state("Setting up")
+
+ # Set up tables.
+ setup_res = yield defer.gatherResults(
+ [
+ self.setup_table(table)
+ for table in tables
+ if table not in ["schema_version", "applied_schema_deltas"]
+ and not table.startswith("sqlite_")
+ ],
+ consumeErrors=True,
+ )
+
+ # Process tables.
+ yield defer.gatherResults(
+ [
+ self.handle_table(*res)
+ for res in setup_res
+ ],
+ consumeErrors=True,
+ )
+
+ self.progress.done()
+ except:
+ global end_error_exec_info
+ end_error_exec_info = sys.exc_info()
+ logger.exception("")
+ finally:
+ reactor.stop()
+
+ def _convert_rows(self, table, headers, rows):
+ bool_col_names = BOOLEAN_COLUMNS.get(table, [])
+
+ bool_cols = [
+ i for i, h in enumerate(headers) if h in bool_col_names
+ ]
+
+ def conv(j, col):
+ if j in bool_cols:
+ return bool(col)
+ return col
+
+ for i, row in enumerate(rows):
+ rows[i] = tuple(
+ conv(j, col)
+ for j, col in enumerate(row)
+ if j > 0
+ )
+
+ @defer.inlineCallbacks
+ def _setup_sent_transactions(self):
+ # Only save things from the last day
+ yesterday = int(time.time()*1000) - 86400000
+
+ # And save the max transaction id from each destination
+ select = (
+ "SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
+ "SELECT max(rowid) FROM sent_transactions"
+ " GROUP BY destination"
+ ")"
+ )
+
+ def r(txn):
+ txn.execute(select)
+ rows = txn.fetchall()
+ headers = [column[0] for column in txn.description]
+
+ ts_ind = headers.index('ts')
+
+ return headers, [r for r in rows if r[ts_ind] < yesterday]
+
+ headers, rows = yield self.sqlite_store.runInteraction(
+ "select", r,
+ )
+
+ self._convert_rows("sent_transactions", headers, rows)
+
+ inserted_rows = len(rows)
+ if inserted_rows:
+ max_inserted_rowid = max(r[0] for r in rows)
+
+ def insert(txn):
+ self.postgres_store.insert_many_txn(
+ txn, "sent_transactions", headers[1:], rows
+ )
+
+ yield self.postgres_store.execute(insert)
+ else:
+ max_inserted_rowid = 0
+
+ def get_start_id(txn):
+ txn.execute(
+ "SELECT rowid FROM sent_transactions WHERE ts >= ?"
+ " ORDER BY rowid ASC LIMIT 1",
+ (yesterday,)
+ )
+
+ rows = txn.fetchall()
+ if rows:
+ return rows[0][0]
+ else:
+ return 1
+
+ next_chunk = yield self.sqlite_store.execute(get_start_id)
+ next_chunk = max(max_inserted_rowid + 1, next_chunk)
+
+ yield self.postgres_store._simple_insert(
+ table="port_from_sqlite3",
+ values={"table_name": "sent_transactions", "rowid": next_chunk}
+ )
+
+ def get_sent_table_size(txn):
+ txn.execute(
+ "SELECT count(*) FROM sent_transactions"
+ " WHERE ts >= ?",
+ (yesterday,)
+ )
+ size, = txn.fetchone()
+ return int(size)
+
+ remaining_count = yield self.sqlite_store.execute(
+ get_sent_table_size
+ )
+
+ total_count = remaining_count + inserted_rows
+
+ defer.returnValue((next_chunk, inserted_rows, total_count))
+
+ @defer.inlineCallbacks
+ def _get_remaining_count_to_port(self, table, next_chunk):
+ rows = yield self.sqlite_store.execute_sql(
+ "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
+ next_chunk,
+ )
+
+ defer.returnValue(rows[0][0])
+
+ @defer.inlineCallbacks
+ def _get_already_ported_count(self, table):
+ rows = yield self.postgres_store.execute_sql(
+ "SELECT count(*) FROM %s" % (table,),
+ )
+
+ defer.returnValue(rows[0][0])
+
+ @defer.inlineCallbacks
+ def _get_total_count_to_port(self, table, next_chunk):
+ remaining, done = yield defer.gatherResults(
+ [
+ self._get_remaining_count_to_port(table, next_chunk),
+ self._get_already_ported_count(table),
+ ],
+ consumeErrors=True,
+ )
+
+ remaining = int(remaining) if remaining else 0
+ done = int(done) if done else 0
+
+ defer.returnValue((done, remaining + done))
+
+
+##############################################
+###### The following is simply UI stuff ######
+##############################################
+
+
+class Progress(object):
+ """Used to report progress of the port
+ """
+ def __init__(self):
+ self.tables = {}
+
+ self.start_time = int(time.time())
+
+ def add_table(self, table, cur, size):
+ self.tables[table] = {
+ "start": cur,
+ "num_done": cur,
+ "total": size,
+ "perc": int(cur * 100 / size),
+ }
+
+ def update(self, table, num_done):
+ data = self.tables[table]
+ data["num_done"] = num_done
+ data["perc"] = int(num_done * 100 / data["total"])
+
+ def done(self):
+ pass
+
+
+class CursesProgress(Progress):
+ """Reports progress to a curses window
+ """
+ def __init__(self, stdscr):
+ self.stdscr = stdscr
+
+ curses.use_default_colors()
+ curses.curs_set(0)
+
+ curses.init_pair(1, curses.COLOR_RED, -1)
+ curses.init_pair(2, curses.COLOR_GREEN, -1)
+
+ self.last_update = 0
+
+ self.finished = False
+
+ self.total_processed = 0
+ self.total_remaining = 0
+
+ super(CursesProgress, self).__init__()
+
+ def update(self, table, num_done):
+ super(CursesProgress, self).update(table, num_done)
+
+ self.total_processed = 0
+ self.total_remaining = 0
+ for table, data in self.tables.items():
+ self.total_processed += data["num_done"] - data["start"]
+ self.total_remaining += data["total"] - data["num_done"]
+
+ self.render()
+
+ def render(self, force=False):
+ now = time.time()
+
+ if not force and now - self.last_update < 0.2:
+ # reactor.callLater(1, self.render)
+ return
+
+ self.stdscr.clear()
+
+ rows, cols = self.stdscr.getmaxyx()
+
+ duration = int(now) - int(self.start_time)
+
+ minutes, seconds = divmod(duration, 60)
+ duration_str = '%02dm %02ds' % (minutes, seconds,)
+
+ if self.finished:
+ status = "Time spent: %s (Done!)" % (duration_str,)
+ else:
+
+ if self.total_processed > 0:
+ left = float(self.total_remaining) / self.total_processed
+
+ est_remaining = (int(now) - self.start_time) * left
+ est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
+ else:
+ est_remaining_str = "Unknown"
+ status = (
+ "Time spent: %s (est. remaining: %s)"
+ % (duration_str, est_remaining_str,)
+ )
+
+ self.stdscr.addstr(
+ 0, 0,
+ status,
+ curses.A_BOLD,
+ )
+
+ max_len = max([len(t) for t in self.tables.keys()])
+
+ left_margin = 5
+ middle_space = 1
+
+ items = self.tables.items()
+ items.sort(
+ key=lambda i: (i[1]["perc"], i[0]),
+ )
+
+ for i, (table, data) in enumerate(items):
+ if i + 2 >= rows:
+ break
+
+ perc = data["perc"]
+
+ color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
+
+ self.stdscr.addstr(
+ i+2, left_margin + max_len - len(table),
+ table,
+ curses.A_BOLD | color,
+ )
+
+ size = 20
+
+ progress = "[%s%s]" % (
+ "#" * int(perc*size/100),
+ " " * (size - int(perc*size/100)),
+ )
+
+ self.stdscr.addstr(
+ i+2, left_margin + max_len + middle_space,
+ "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
+ )
+
+ if self.finished:
+ self.stdscr.addstr(
+ rows-1, 0,
+ "Press any key to exit...",
+ )
+
+ self.stdscr.refresh()
+ self.last_update = time.time()
+
+ def done(self):
+ self.finished = True
+ self.render(True)
+ self.stdscr.getch()
+
+ def set_state(self, state):
+ self.stdscr.clear()
+ self.stdscr.addstr(
+ 0, 0,
+ state + "...",
+ curses.A_BOLD,
+ )
+ self.stdscr.refresh()
+
+
+class TerminalProgress(Progress):
+ """Just prints progress to the terminal
+ """
+ def update(self, table, num_done):
+ super(TerminalProgress, self).update(table, num_done)
+
+ data = self.tables[table]
+
+ print "%s: %d%% (%d/%d)" % (
+ table, data["perc"],
+ data["num_done"], data["total"],
+ )
+
+ def set_state(self, state):
+ print state + "..."
+
+
+##############################################
+##############################################
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="A script to port an existing synapse SQLite database to"
+ " a new PostgreSQL database."
+ )
+ parser.add_argument("-v", action='store_true')
+ parser.add_argument(
+ "--sqlite-database", required=True,
+ help="The snapshot of the SQLite database file. This must not be"
+ " currently used by a running synapse server"
+ )
+ parser.add_argument(
+ "--postgres-config", type=argparse.FileType('r'), required=True,
+ help="The database config file for the PostgreSQL database"
+ )
+ parser.add_argument(
+ "--curses", action='store_true',
+ help="display a curses based progress UI"
+ )
+
+ parser.add_argument(
+ "--batch-size", type=int, default=1000,
+ help="The number of rows to select from the SQLite table each"
+ " iteration [default=1000]",
+ )
+
+ args = parser.parse_args()
+
+ logging_config = {
+ "level": logging.DEBUG if args.v else logging.INFO,
+ "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
+ }
+
+ if args.curses:
+ logging_config["filename"] = "port-synapse.log"
+
+ logging.basicConfig(**logging_config)
+
+ sqlite_config = {
+ "name": "sqlite3",
+ "args": {
+ "database": args.sqlite_database,
+ "cp_min": 1,
+ "cp_max": 1,
+ "check_same_thread": False,
+ },
+ }
+
+ postgres_config = yaml.safe_load(args.postgres_config)
+
+ if "database" in postgres_config:
+ postgres_config = postgres_config["database"]
+
+ if "name" not in postgres_config:
+ sys.stderr.write("Malformed database config: no 'name'")
+ sys.exit(2)
+ if postgres_config["name"] != "psycopg2":
+ sys.stderr.write("Database must use 'psycopg2' connector.")
+ sys.exit(3)
+
+ def start(stdscr=None):
+ if stdscr:
+ progress = CursesProgress(stdscr)
+ else:
+ progress = TerminalProgress()
+
+ porter = Porter(
+ sqlite_config=sqlite_config,
+ postgres_config=postgres_config,
+ progress=progress,
+ batch_size=args.batch_size,
+ )
+
+ reactor.callWhenRunning(porter.run)
+
+ reactor.run()
+
+ if args.curses:
+ curses.wrapper(start)
+ else:
+ start()
+
+ if end_error_exec_info:
+ exc_type, exc_value, exc_traceback = end_error_exec_info
+ traceback.print_exception(exc_type, exc_value, exc_traceback)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..ba027c7d
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,18 @@
+[build_sphinx]
+source-dir = docs/sphinx
+build-dir = docs/build
+all_files = 1
+
+[trial]
+test_suite = tests
+
+[check-manifest]
+ignore =
+ contrib
+ contrib/*
+ docs/*
+ pylint.cfg
+ tox.ini
+
+[flake8]
+max-line-length = 90
diff --git a/setup.py b/setup.py
new file mode 100755
index 00000000..9d24761d
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import os
+from setuptools import setup, find_packages, Command
+import sys
+
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+def read_file(path_segments):
+ """Read a file from the package. Takes a list of strings to join to
+ make the path"""
+ file_path = os.path.join(here, *path_segments)
+ with open(file_path) as f:
+ return f.read()
+
+
+def exec_file(path_segments):
+ """Execute a single python file to get the variables defined in it"""
+ result = {}
+ code = read_file(path_segments)
+ exec(code, result)
+ return result
+
+
+class Tox(Command):
+ user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
+
+ def initialize_options(self):
+ self.tox_args = None
+
+ def finalize_options(self):
+ self.test_args = []
+ self.test_suite = True
+
+ def run(self):
+ #import here, cause outside the eggs aren't loaded
+ try:
+ import tox
+ except ImportError:
+ try:
+ self.distribution.fetch_build_eggs("tox")
+ import tox
+ except:
+ raise RuntimeError(
+ "The tests need 'tox' to run. Please install 'tox'."
+ )
+ import shlex
+ args = self.tox_args
+ if args:
+ args = shlex.split(self.tox_args)
+ else:
+ args = []
+ errno = tox.cmdline(args=args)
+ sys.exit(errno)
+
+
+version = exec_file(("synapse", "__init__.py"))["__version__"]
+dependencies = exec_file(("synapse", "python_dependencies.py"))
+long_description = read_file(("README.rst",))
+
+setup(
+ name="matrix-synapse",
+ version=version,
+ packages=find_packages(exclude=["tests", "tests.*"]),
+ description="Reference Synapse Home Server",
+ install_requires=dependencies['requirements'](include_conditional=True).keys(),
+ dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
+ include_package_data=True,
+ zip_safe=False,
+ long_description=long_description,
+ scripts=["synctl"] + glob.glob("scripts/*"),
+ cmdclass={'test': Tox},
+)
diff --git a/synapse/__init__.py b/synapse/__init__.py
new file mode 100644
index 00000000..f0eac97b
--- /dev/null
+++ b/synapse/__init__.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This is a reference implementation of a Matrix home server.
+"""
+
+__version__ = "0.11.0"
diff --git a/synapse/api/__init__.py b/synapse/api/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/api/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
new file mode 100644
index 00000000..8111b344
--- /dev/null
+++ b/synapse/api/auth.py
@@ -0,0 +1,943 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains classes for authenticating the user."""
+from canonicaljson import encode_canonical_json
+from signedjson.key import decode_verify_key_bytes
+from signedjson.sign import verify_signed_json, SignatureVerifyException
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership, JoinRules
+from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
+from synapse.types import RoomID, UserID, EventID
+from synapse.util.logutils import log_function
+from unpaddedbase64 import decode_base64
+
+import logging
+import pymacaroons
+
+logger = logging.getLogger(__name__)
+
+
+AuthEventTypes = (
+ EventTypes.Create, EventTypes.Member, EventTypes.PowerLevels,
+ EventTypes.JoinRules, EventTypes.RoomHistoryVisibility,
+ EventTypes.ThirdPartyInvite,
+)
+
+
+class Auth(object):
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.store = hs.get_datastore()
+ self.state = hs.get_state_handler()
+ self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
+ self._KNOWN_CAVEAT_PREFIXES = set([
+ "gen = ",
+ "guest = ",
+ "type = ",
+ "time < ",
+ "user_id = ",
+ ])
+
+ def check(self, event, auth_events):
+ """ Checks if this event is correctly authed.
+
+ Args:
+ event: the event being checked.
+ auth_events (dict: event-key -> event): the existing room state.
+
+
+ Returns:
+ True if the auth checks pass.
+ """
+ self.check_size_limits(event)
+
+ try:
+ if not hasattr(event, "room_id"):
+ raise AuthError(500, "Event has no room_id: %s" % event)
+ if auth_events is None:
+ # Oh, we don't know what the state of the room was, so we
+ # are trusting that this is allowed (at least for now)
+ logger.warn("Trusting event: %s", event.event_id)
+ return True
+
+ if event.type == EventTypes.Create:
+ # FIXME
+ return True
+
+ creation_event = auth_events.get((EventTypes.Create, ""), None)
+
+ if not creation_event:
+ raise SynapseError(
+ 403,
+ "Room %r does not exist" % (event.room_id,)
+ )
+
+ creating_domain = RoomID.from_string(event.room_id).domain
+ originating_domain = UserID.from_string(event.sender).domain
+ if creating_domain != originating_domain:
+ if not self.can_federate(event, auth_events):
+ raise AuthError(
+ 403,
+ "This room has been marked as unfederatable."
+ )
+
+ # FIXME: Temp hack
+ if event.type == EventTypes.Aliases:
+ return True
+
+ logger.debug(
+ "Auth events: %s",
+ [a.event_id for a in auth_events.values()]
+ )
+
+ if event.type == EventTypes.Member:
+ allowed = self.is_membership_change_allowed(
+ event, auth_events
+ )
+ if allowed:
+ logger.debug("Allowing! %s", event)
+ else:
+ logger.debug("Denying! %s", event)
+ return allowed
+
+ self.check_event_sender_in_room(event, auth_events)
+ self._can_send_event(event, auth_events)
+
+ if event.type == EventTypes.PowerLevels:
+ self._check_power_levels(event, auth_events)
+
+ if event.type == EventTypes.Redaction:
+ self.check_redaction(event, auth_events)
+
+ logger.debug("Allowing! %s", event)
+ except AuthError as e:
+ logger.info(
+ "Event auth check failed on event %s with msg: %s",
+ event, e.msg
+ )
+ logger.info("Denying! %s", event)
+ raise
+
+ def check_size_limits(self, event):
+ def too_big(field):
+ raise EventSizeError("%s too large" % (field,))
+
+ if len(event.user_id) > 255:
+ too_big("user_id")
+ if len(event.room_id) > 255:
+ too_big("room_id")
+ if event.is_state() and len(event.state_key) > 255:
+ too_big("state_key")
+ if len(event.type) > 255:
+ too_big("type")
+ if len(event.event_id) > 255:
+ too_big("event_id")
+ if len(encode_canonical_json(event.get_pdu_json())) > 65536:
+ too_big("event")
+
+ @defer.inlineCallbacks
+ def check_joined_room(self, room_id, user_id, current_state=None):
+ """Check if the user is currently joined in the room
+ Args:
+ room_id(str): The room to check.
+ user_id(str): The user to check.
+ current_state(dict): Optional map of the current state of the room.
+ If provided then that map is used to check whether they are a
+ member of the room. Otherwise the current membership is
+ loaded from the database.
+ Raises:
+ AuthError if the user is not in the room.
+ Returns:
+ A deferred membership event for the user if the user is in
+ the room.
+ """
+ if current_state:
+ member = current_state.get(
+ (EventTypes.Member, user_id),
+ None
+ )
+ else:
+ member = yield self.state.get_current_state(
+ room_id=room_id,
+ event_type=EventTypes.Member,
+ state_key=user_id
+ )
+
+ self._check_joined_room(member, user_id, room_id)
+ defer.returnValue(member)
+
+ @defer.inlineCallbacks
+ def check_user_was_in_room(self, room_id, user_id):
+ """Check if the user was in the room at some point.
+ Args:
+ room_id(str): The room to check.
+ user_id(str): The user to check.
+ Raises:
+ AuthError if the user was never in the room.
+ Returns:
+ A deferred membership event for the user if the user was in the
+ room. This will be the join event if they are currently joined to
+ the room. This will be the leave event if they have left the room.
+ """
+ member = yield self.state.get_current_state(
+ room_id=room_id,
+ event_type=EventTypes.Member,
+ state_key=user_id
+ )
+ membership = member.membership if member else None
+
+ if membership not in (Membership.JOIN, Membership.LEAVE):
+ raise AuthError(403, "User %s not in room %s" % (
+ user_id, room_id
+ ))
+
+ defer.returnValue(member)
+
+ @defer.inlineCallbacks
+ def check_host_in_room(self, room_id, host):
+ curr_state = yield self.state.get_current_state(room_id)
+
+ for event in curr_state.values():
+ if event.type == EventTypes.Member:
+ try:
+ if UserID.from_string(event.state_key).domain != host:
+ continue
+ except:
+ logger.warn("state_key not user_id: %s", event.state_key)
+ continue
+
+ if event.content["membership"] == Membership.JOIN:
+ defer.returnValue(True)
+
+ defer.returnValue(False)
+
+ def check_event_sender_in_room(self, event, auth_events):
+ key = (EventTypes.Member, event.user_id, )
+ member_event = auth_events.get(key)
+
+ return self._check_joined_room(
+ member_event,
+ event.user_id,
+ event.room_id
+ )
+
+ def _check_joined_room(self, member, user_id, room_id):
+ if not member or member.membership != Membership.JOIN:
+ raise AuthError(403, "User %s not in room %s (%s)" % (
+ user_id, room_id, repr(member)
+ ))
+
+ def can_federate(self, event, auth_events):
+ creation_event = auth_events.get((EventTypes.Create, ""))
+
+ return creation_event.content.get("m.federate", True) is True
+
+ @log_function
+ def is_membership_change_allowed(self, event, auth_events):
+ membership = event.content["membership"]
+
+ # Check if this is the room creator joining:
+ if len(event.prev_events) == 1 and Membership.JOIN == membership:
+ # Get room creation event:
+ key = (EventTypes.Create, "", )
+ create = auth_events.get(key)
+ if create and event.prev_events[0][0] == create.event_id:
+ if create.content["creator"] == event.state_key:
+ return True
+
+ target_user_id = event.state_key
+
+ creating_domain = RoomID.from_string(event.room_id).domain
+ target_domain = UserID.from_string(target_user_id).domain
+ if creating_domain != target_domain:
+ if not self.can_federate(event, auth_events):
+ raise AuthError(
+ 403,
+ "This room has been marked as unfederatable."
+ )
+
+ # get info about the caller
+ key = (EventTypes.Member, event.user_id, )
+ caller = auth_events.get(key)
+
+ caller_in_room = caller and caller.membership == Membership.JOIN
+ caller_invited = caller and caller.membership == Membership.INVITE
+
+ # get info about the target
+ key = (EventTypes.Member, target_user_id, )
+ target = auth_events.get(key)
+
+ target_in_room = target and target.membership == Membership.JOIN
+ target_banned = target and target.membership == Membership.BAN
+
+ key = (EventTypes.JoinRules, "", )
+ join_rule_event = auth_events.get(key)
+ if join_rule_event:
+ join_rule = join_rule_event.content.get(
+ "join_rule", JoinRules.INVITE
+ )
+ else:
+ join_rule = JoinRules.INVITE
+
+ user_level = self._get_user_power_level(event.user_id, auth_events)
+ target_level = self._get_user_power_level(
+ target_user_id, auth_events
+ )
+
+ # FIXME (erikj): What should we do here as the default?
+ ban_level = self._get_named_level(auth_events, "ban", 50)
+
+ logger.debug(
+ "is_membership_change_allowed: %s",
+ {
+ "caller_in_room": caller_in_room,
+ "caller_invited": caller_invited,
+ "target_banned": target_banned,
+ "target_in_room": target_in_room,
+ "membership": membership,
+ "join_rule": join_rule,
+ "target_user_id": target_user_id,
+ "event.user_id": event.user_id,
+ }
+ )
+
+ if Membership.INVITE == membership and "third_party_invite" in event.content:
+ if not self._verify_third_party_invite(event, auth_events):
+ raise AuthError(403, "You are not invited to this room.")
+ return True
+
+ if Membership.JOIN != membership:
+ if (caller_invited
+ and Membership.LEAVE == membership
+ and target_user_id == event.user_id):
+ return True
+
+ if not caller_in_room: # caller isn't joined
+ raise AuthError(
+ 403,
+ "%s not in room %s." % (event.user_id, event.room_id,)
+ )
+
+ if Membership.INVITE == membership:
+ # TODO (erikj): We should probably handle this more intelligently
+ # PRIVATE join rules.
+
+ # Invites are valid iff caller is in the room and target isn't.
+ if target_banned:
+ raise AuthError(
+ 403, "%s is banned from the room" % (target_user_id,)
+ )
+ elif target_in_room: # the target is already in the room.
+ raise AuthError(403, "%s is already in the room." %
+ target_user_id)
+ else:
+ invite_level = self._get_named_level(auth_events, "invite", 0)
+
+ if user_level < invite_level:
+ raise AuthError(
+ 403, "You cannot invite user %s." % target_user_id
+ )
+ elif Membership.JOIN == membership:
+ # Joins are valid iff caller == target and they were:
+ # invited: They are accepting the invitation
+ # joined: It's a NOOP
+ if event.user_id != target_user_id:
+ raise AuthError(403, "Cannot force another user to join.")
+ elif target_banned:
+ raise AuthError(403, "You are banned from this room")
+ elif join_rule == JoinRules.PUBLIC:
+ pass
+ elif join_rule == JoinRules.INVITE:
+ if not caller_in_room and not caller_invited:
+ raise AuthError(403, "You are not invited to this room.")
+ else:
+ # TODO (erikj): may_join list
+ # TODO (erikj): private rooms
+ raise AuthError(403, "You are not allowed to join this room")
+ elif Membership.LEAVE == membership:
+ # TODO (erikj): Implement kicks.
+ if target_banned and user_level < ban_level:
+ raise AuthError(
+ 403, "You cannot unban user &s." % (target_user_id,)
+ )
+ elif target_user_id != event.user_id:
+ kick_level = self._get_named_level(auth_events, "kick", 50)
+
+ if user_level < kick_level or user_level <= target_level:
+ raise AuthError(
+ 403, "You cannot kick user %s." % target_user_id
+ )
+ elif Membership.BAN == membership:
+ if user_level < ban_level or user_level <= target_level:
+ raise AuthError(403, "You don't have permission to ban")
+ else:
+ raise AuthError(500, "Unknown membership %s" % membership)
+
+ return True
+
+ def _verify_third_party_invite(self, event, auth_events):
+ """
+ Validates that the invite event is authorized by a previous third-party invite.
+
+ Checks that the public key, and keyserver, match those in the third party invite,
+ and that the invite event has a signature issued using that public key.
+
+ Args:
+ event: The m.room.member join event being validated.
+ auth_events: All relevant previous context events which may be used
+ for authorization decisions.
+
+ Return:
+ True if the event fulfills the expectations of a previous third party
+ invite event.
+ """
+ if "third_party_invite" not in event.content:
+ return False
+ if "signed" not in event.content["third_party_invite"]:
+ return False
+ signed = event.content["third_party_invite"]["signed"]
+ for key in {"mxid", "token"}:
+ if key not in signed:
+ return False
+
+ token = signed["token"]
+
+ invite_event = auth_events.get(
+ (EventTypes.ThirdPartyInvite, token,)
+ )
+ if not invite_event:
+ return False
+
+ if event.user_id != invite_event.user_id:
+ return False
+ try:
+ public_key = invite_event.content["public_key"]
+ if signed["mxid"] != event.state_key:
+ return False
+ if signed["token"] != token:
+ return False
+ for server, signature_block in signed["signatures"].items():
+ for key_name, encoded_signature in signature_block.items():
+ if not key_name.startswith("ed25519:"):
+ return False
+ verify_key = decode_verify_key_bytes(
+ key_name,
+ decode_base64(public_key)
+ )
+ verify_signed_json(signed, server, verify_key)
+
+ # We got the public key from the invite, so we know that the
+ # correct server signed the signed bundle.
+ # The caller is responsible for checking that the signing
+ # server has not revoked that public key.
+ return True
+ return False
+ except (KeyError, SignatureVerifyException,):
+ return False
+
+ def _get_power_level_event(self, auth_events):
+ key = (EventTypes.PowerLevels, "", )
+ return auth_events.get(key)
+
+ def _get_user_power_level(self, user_id, auth_events):
+ power_level_event = self._get_power_level_event(auth_events)
+
+ if power_level_event:
+ level = power_level_event.content.get("users", {}).get(user_id)
+ if not level:
+ level = power_level_event.content.get("users_default", 0)
+
+ if level is None:
+ return 0
+ else:
+ return int(level)
+ else:
+ key = (EventTypes.Create, "", )
+ create_event = auth_events.get(key)
+ if (create_event is not None and
+ create_event.content["creator"] == user_id):
+ return 100
+ else:
+ return 0
+
+ def _get_named_level(self, auth_events, name, default):
+ power_level_event = self._get_power_level_event(auth_events)
+
+ if not power_level_event:
+ return default
+
+ level = power_level_event.content.get(name, None)
+ if level is not None:
+ return int(level)
+ else:
+ return default
+
+ @defer.inlineCallbacks
+ def get_user_by_req(self, request, allow_guest=False):
+ """ Get a registered user's ID.
+
+ Args:
+ request - An HTTP request with an access_token query parameter.
+ Returns:
+ tuple of:
+ UserID (str)
+ Access token ID (str)
+ Raises:
+ AuthError if no user by that token exists or the token is invalid.
+ """
+ # Can optionally look elsewhere in the request (e.g. headers)
+ try:
+ access_token = request.args["access_token"][0]
+
+ # Check for application service tokens with a user_id override
+ try:
+ app_service = yield self.store.get_app_service_by_token(
+ access_token
+ )
+ if not app_service:
+ raise KeyError
+
+ user_id = app_service.sender
+ if "user_id" in request.args:
+ user_id = request.args["user_id"][0]
+ if not app_service.is_interested_in_user(user_id):
+ raise AuthError(
+ 403,
+ "Application service cannot masquerade as this user."
+ )
+
+ if not user_id:
+ raise KeyError
+
+ request.authenticated_entity = user_id
+
+ defer.returnValue((UserID.from_string(user_id), "", False))
+ return
+ except KeyError:
+ pass # normal users won't have the user_id query parameter set.
+
+ user_info = yield self._get_user_by_access_token(access_token)
+ user = user_info["user"]
+ token_id = user_info["token_id"]
+ is_guest = user_info["is_guest"]
+
+ ip_addr = self.hs.get_ip_from_request(request)
+ user_agent = request.requestHeaders.getRawHeaders(
+ "User-Agent",
+ default=[""]
+ )[0]
+ if user and access_token and ip_addr:
+ self.store.insert_client_ip(
+ user=user,
+ access_token=access_token,
+ ip=ip_addr,
+ user_agent=user_agent
+ )
+
+ if is_guest and not allow_guest:
+ raise AuthError(
+ 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
+ )
+
+ request.authenticated_entity = user.to_string()
+
+ defer.returnValue((user, token_id, is_guest,))
+ except KeyError:
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
+ errcode=Codes.MISSING_TOKEN
+ )
+
+ @defer.inlineCallbacks
+ def _get_user_by_access_token(self, token):
+ """ Get a registered user's ID.
+
+ Args:
+ token (str): The access token to get the user by.
+ Returns:
+ dict : dict that includes the user and the ID of their access token.
+ Raises:
+ AuthError if no user by that token exists or the token is invalid.
+ """
+ try:
+ ret = yield self._get_user_from_macaroon(token)
+ except AuthError:
+ # TODO(daniel): Remove this fallback when all existing access tokens
+ # have been re-issued as macaroons.
+ ret = yield self._look_up_user_by_access_token(token)
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def _get_user_from_macaroon(self, macaroon_str):
+ try:
+ macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
+ self.validate_macaroon(
+ macaroon, "access",
+ [lambda c: c.startswith("time < ")]
+ )
+
+ user_prefix = "user_id = "
+ user = None
+ guest = False
+ for caveat in macaroon.caveats:
+ if caveat.caveat_id.startswith(user_prefix):
+ user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
+ elif caveat.caveat_id == "guest = true":
+ guest = True
+
+ if user is None:
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+
+ if guest:
+ ret = {
+ "user": user,
+ "is_guest": True,
+ "token_id": None,
+ }
+ else:
+ # This codepath exists so that we can actually return a
+ # token ID, because we use token IDs in place of device
+ # identifiers throughout the codebase.
+ # TODO(daniel): Remove this fallback when device IDs are
+ # properly implemented.
+ ret = yield self._look_up_user_by_access_token(macaroon_str)
+ if ret["user"] != user:
+ logger.error(
+ "Macaroon user (%s) != DB user (%s)",
+ user,
+ ret["user"]
+ )
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS,
+ "User mismatch in macaroon",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+ defer.returnValue(ret)
+ except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+
+ def validate_macaroon(self, macaroon, type_string, additional_validation_functions):
+ v = pymacaroons.Verifier()
+ v.satisfy_exact("gen = 1")
+ v.satisfy_exact("type = " + type_string)
+ v.satisfy_general(lambda c: c.startswith("user_id = "))
+ v.satisfy_exact("guest = true")
+
+ for validation_function in additional_validation_functions:
+ v.satisfy_general(validation_function)
+ v.verify(macaroon, self.hs.config.macaroon_secret_key)
+
+ v = pymacaroons.Verifier()
+ v.satisfy_general(self._verify_recognizes_caveats)
+ v.verify(macaroon, self.hs.config.macaroon_secret_key)
+
+ def verify_expiry(self, caveat):
+ prefix = "time < "
+ if not caveat.startswith(prefix):
+ return False
+ expiry = int(caveat[len(prefix):])
+ now = self.hs.get_clock().time_msec()
+ return now < expiry
+
+ def _verify_recognizes_caveats(self, caveat):
+ first_space = caveat.find(" ")
+ if first_space < 0:
+ return False
+ second_space = caveat.find(" ", first_space + 1)
+ if second_space < 0:
+ return False
+ return caveat[:second_space + 1] in self._KNOWN_CAVEAT_PREFIXES
+
+ @defer.inlineCallbacks
+ def _look_up_user_by_access_token(self, token):
+ ret = yield self.store.get_user_by_access_token(token)
+ if not ret:
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+ user_info = {
+ "user": UserID.from_string(ret.get("name")),
+ "token_id": ret.get("token_id", None),
+ "is_guest": False,
+ }
+ defer.returnValue(user_info)
+
+ @defer.inlineCallbacks
+ def get_appservice_by_req(self, request):
+ try:
+ token = request.args["access_token"][0]
+ service = yield self.store.get_app_service_by_token(token)
+ if not service:
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS,
+ "Unrecognised access token.",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+ request.authenticated_entity = service.sender
+ defer.returnValue(service)
+ except KeyError:
+ raise AuthError(
+ self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
+ )
+
+ def is_server_admin(self, user):
+ return self.store.is_server_admin(user)
+
+ @defer.inlineCallbacks
+ def add_auth_events(self, builder, context):
+ auth_ids = self.compute_auth_events(builder, context.current_state)
+
+ auth_events_entries = yield self.store.add_event_hashes(
+ auth_ids
+ )
+
+ builder.auth_events = auth_events_entries
+
+ def compute_auth_events(self, event, current_state):
+ if event.type == EventTypes.Create:
+ return []
+
+ auth_ids = []
+
+ key = (EventTypes.PowerLevels, "", )
+ power_level_event = current_state.get(key)
+
+ if power_level_event:
+ auth_ids.append(power_level_event.event_id)
+
+ key = (EventTypes.JoinRules, "", )
+ join_rule_event = current_state.get(key)
+
+ key = (EventTypes.Member, event.user_id, )
+ member_event = current_state.get(key)
+
+ key = (EventTypes.Create, "", )
+ create_event = current_state.get(key)
+ if create_event:
+ auth_ids.append(create_event.event_id)
+
+ if join_rule_event:
+ join_rule = join_rule_event.content.get("join_rule")
+ is_public = join_rule == JoinRules.PUBLIC if join_rule else False
+ else:
+ is_public = False
+
+ if event.type == EventTypes.Member:
+ e_type = event.content["membership"]
+ if e_type in [Membership.JOIN, Membership.INVITE]:
+ if join_rule_event:
+ auth_ids.append(join_rule_event.event_id)
+
+ if e_type == Membership.JOIN:
+ if member_event and not is_public:
+ auth_ids.append(member_event.event_id)
+ else:
+ if member_event:
+ auth_ids.append(member_event.event_id)
+
+ if e_type == Membership.INVITE:
+ if "third_party_invite" in event.content:
+ key = (
+ EventTypes.ThirdPartyInvite,
+ event.content["third_party_invite"]["token"]
+ )
+ third_party_invite = current_state.get(key)
+ if third_party_invite:
+ auth_ids.append(third_party_invite.event_id)
+ elif member_event:
+ if member_event.content["membership"] == Membership.JOIN:
+ auth_ids.append(member_event.event_id)
+
+ return auth_ids
+
+ @log_function
+ def _can_send_event(self, event, auth_events):
+ key = (EventTypes.PowerLevels, "", )
+ send_level_event = auth_events.get(key)
+ send_level = None
+ if send_level_event:
+ send_level = send_level_event.content.get("events", {}).get(
+ event.type
+ )
+ if send_level is None:
+ if hasattr(event, "state_key"):
+ send_level = send_level_event.content.get(
+ "state_default", 50
+ )
+ else:
+ send_level = send_level_event.content.get(
+ "events_default", 0
+ )
+
+ if send_level:
+ send_level = int(send_level)
+ else:
+ send_level = 0
+
+ user_level = self._get_user_power_level(event.user_id, auth_events)
+
+ if user_level < send_level:
+ raise AuthError(
+ 403,
+ "You don't have permission to post that to the room. " +
+ "user_level (%d) < send_level (%d)" % (user_level, send_level)
+ )
+
+ # Check state_key
+ if hasattr(event, "state_key"):
+ if event.state_key.startswith("@"):
+ if event.state_key != event.user_id:
+ raise AuthError(
+ 403,
+ "You are not allowed to set others state"
+ )
+ else:
+ sender_domain = UserID.from_string(
+ event.user_id
+ ).domain
+
+ if sender_domain != event.state_key:
+ raise AuthError(
+ 403,
+ "You are not allowed to set others state"
+ )
+
+ return True
+
+ def check_redaction(self, event, auth_events):
+ """Check whether the event sender is allowed to redact the target event.
+
+ Returns:
+ True if the the sender is allowed to redact the target event if the
+ target event was created by them.
+ False if the sender is allowed to redact the target event with no
+ further checks.
+
+ Raises:
+ AuthError if the event sender is definitely not allowed to redact
+ the target event.
+ """
+ user_level = self._get_user_power_level(event.user_id, auth_events)
+
+ redact_level = self._get_named_level(auth_events, "redact", 50)
+
+ if user_level > redact_level:
+ return False
+
+ redacter_domain = EventID.from_string(event.event_id).domain
+ redactee_domain = EventID.from_string(event.redacts).domain
+ if redacter_domain == redactee_domain:
+ return True
+
+ raise AuthError(
+ 403,
+ "You don't have permission to redact events"
+ )
+
+ def _check_power_levels(self, event, auth_events):
+ user_list = event.content.get("users", {})
+ # Validate users
+ for k, v in user_list.items():
+ try:
+ UserID.from_string(k)
+ except:
+ raise SynapseError(400, "Not a valid user_id: %s" % (k,))
+
+ try:
+ int(v)
+ except:
+ raise SynapseError(400, "Not a valid power level: %s" % (v,))
+
+ key = (event.type, event.state_key, )
+ current_state = auth_events.get(key)
+
+ if not current_state:
+ return
+
+ user_level = self._get_user_power_level(event.user_id, auth_events)
+
+ # Check other levels:
+ levels_to_check = [
+ ("users_default", None),
+ ("events_default", None),
+ ("state_default", None),
+ ("ban", None),
+ ("redact", None),
+ ("kick", None),
+ ("invite", None),
+ ]
+
+ old_list = current_state.content.get("users")
+ for user in set(old_list.keys() + user_list.keys()):
+ levels_to_check.append(
+ (user, "users")
+ )
+
+ old_list = current_state.content.get("events")
+ new_list = event.content.get("events")
+ for ev_id in set(old_list.keys() + new_list.keys()):
+ levels_to_check.append(
+ (ev_id, "events")
+ )
+
+ old_state = current_state.content
+ new_state = event.content
+
+ for level_to_check, dir in levels_to_check:
+ old_loc = old_state
+ new_loc = new_state
+ if dir:
+ old_loc = old_loc.get(dir, {})
+ new_loc = new_loc.get(dir, {})
+
+ if level_to_check in old_loc:
+ old_level = int(old_loc[level_to_check])
+ else:
+ old_level = None
+
+ if level_to_check in new_loc:
+ new_level = int(new_loc[level_to_check])
+ else:
+ new_level = None
+
+ if new_level is not None and old_level is not None:
+ if new_level == old_level:
+ continue
+
+ if dir == "users" and level_to_check != event.user_id:
+ if old_level == user_level:
+ raise AuthError(
+ 403,
+ "You don't have permission to remove ops level equal "
+ "to your own"
+ )
+
+ if old_level > user_level or new_level > user_level:
+ raise AuthError(
+ 403,
+ "You don't have permission to add ops level greater "
+ "than your own"
+ )
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
new file mode 100644
index 00000000..c2450b77
--- /dev/null
+++ b/synapse/api/constants.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains constants from the specification."""
+
+
+class Membership(object):
+
+ """Represents the membership states of a user in a room."""
+ INVITE = u"invite"
+ JOIN = u"join"
+ KNOCK = u"knock"
+ LEAVE = u"leave"
+ BAN = u"ban"
+ LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
+
+
+class PresenceState(object):
+ """Represents the presence state of a user."""
+ OFFLINE = u"offline"
+ UNAVAILABLE = u"unavailable"
+ ONLINE = u"online"
+ FREE_FOR_CHAT = u"free_for_chat"
+
+
+class JoinRules(object):
+ PUBLIC = u"public"
+ KNOCK = u"knock"
+ INVITE = u"invite"
+ PRIVATE = u"private"
+
+
+class LoginType(object):
+ PASSWORD = u"m.login.password"
+ OAUTH = u"m.login.oauth2"
+ EMAIL_CODE = u"m.login.email.code"
+ EMAIL_URL = u"m.login.email.url"
+ EMAIL_IDENTITY = u"m.login.email.identity"
+ RECAPTCHA = u"m.login.recaptcha"
+ DUMMY = u"m.login.dummy"
+
+ # Only for C/S API v1
+ APPLICATION_SERVICE = u"m.login.application_service"
+ SHARED_SECRET = u"org.matrix.login.shared_secret"
+
+
+class EventTypes(object):
+ Member = "m.room.member"
+ Create = "m.room.create"
+ JoinRules = "m.room.join_rules"
+ PowerLevels = "m.room.power_levels"
+ Aliases = "m.room.aliases"
+ Redaction = "m.room.redaction"
+ ThirdPartyInvite = "m.room.third_party_invite"
+
+ RoomHistoryVisibility = "m.room.history_visibility"
+ CanonicalAlias = "m.room.canonical_alias"
+ RoomAvatar = "m.room.avatar"
+ GuestAccess = "m.room.guest_access"
+
+ # These are used for validation
+ Message = "m.room.message"
+ Topic = "m.room.topic"
+ Name = "m.room.name"
+
+
+class RejectedReason(object):
+ AUTH_ERROR = "auth_error"
+ REPLACED = "replaced"
+ NOT_ANCESTOR = "not_ancestor"
+
+
+class RoomCreationPreset(object):
+ PRIVATE_CHAT = "private_chat"
+ PUBLIC_CHAT = "public_chat"
+ TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
new file mode 100644
index 00000000..d4037b3d
--- /dev/null
+++ b/synapse/api/errors.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains exceptions and error codes."""
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class Codes(object):
+ UNRECOGNIZED = "M_UNRECOGNIZED"
+ UNAUTHORIZED = "M_UNAUTHORIZED"
+ FORBIDDEN = "M_FORBIDDEN"
+ BAD_JSON = "M_BAD_JSON"
+ NOT_JSON = "M_NOT_JSON"
+ USER_IN_USE = "M_USER_IN_USE"
+ ROOM_IN_USE = "M_ROOM_IN_USE"
+ BAD_PAGINATION = "M_BAD_PAGINATION"
+ UNKNOWN = "M_UNKNOWN"
+ NOT_FOUND = "M_NOT_FOUND"
+ MISSING_TOKEN = "M_MISSING_TOKEN"
+ UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
+ GUEST_ACCESS_FORBIDDEN = "M_GUEST_ACCESS_FORBIDDEN"
+ LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
+ CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
+ CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
+ MISSING_PARAM = "M_MISSING_PARAM"
+ TOO_LARGE = "M_TOO_LARGE"
+ EXCLUSIVE = "M_EXCLUSIVE"
+ THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
+ THREEPID_IN_USE = "THREEPID_IN_USE"
+
+
+class CodeMessageException(RuntimeError):
+ """An exception with integer code and message string attributes."""
+
+ def __init__(self, code, msg):
+ super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
+ self.code = code
+ self.msg = msg
+ self.response_code_message = None
+
+ def error_dict(self):
+ return cs_error(self.msg)
+
+
+class SynapseError(CodeMessageException):
+ """A base error which can be caught for all synapse events."""
+ def __init__(self, code, msg, errcode=Codes.UNKNOWN):
+ """Constructs a synapse error.
+
+ Args:
+ code (int): The integer error code (an HTTP response code)
+ msg (str): The human-readable error message.
+ err (str): The error code e.g 'M_FORBIDDEN'
+ """
+ super(SynapseError, self).__init__(code, msg)
+ self.errcode = errcode
+
+ def error_dict(self):
+ return cs_error(
+ self.msg,
+ self.errcode,
+ )
+
+
+class RegistrationError(SynapseError):
+ """An error raised when a registration event fails."""
+ pass
+
+
+class UnrecognizedRequestError(SynapseError):
+ """An error indicating we don't understand the request you're trying to make"""
+ def __init__(self, *args, **kwargs):
+ if "errcode" not in kwargs:
+ kwargs["errcode"] = Codes.UNRECOGNIZED
+ message = None
+ if len(args) == 0:
+ message = "Unrecognized request"
+ else:
+ message = args[0]
+ super(UnrecognizedRequestError, self).__init__(
+ 400,
+ message,
+ **kwargs
+ )
+
+
+class NotFoundError(SynapseError):
+ """An error indicating we can't find the thing you asked for"""
+ def __init__(self, *args, **kwargs):
+ if "errcode" not in kwargs:
+ kwargs["errcode"] = Codes.NOT_FOUND
+ super(NotFoundError, self).__init__(
+ 404,
+ "Not found",
+ **kwargs
+ )
+
+
+class AuthError(SynapseError):
+ """An error raised when there was a problem authorising an event."""
+
+ def __init__(self, *args, **kwargs):
+ if "errcode" not in kwargs:
+ kwargs["errcode"] = Codes.FORBIDDEN
+ super(AuthError, self).__init__(*args, **kwargs)
+
+
+class EventSizeError(SynapseError):
+ """An error raised when an event is too big."""
+
+ def __init__(self, *args, **kwargs):
+ if "errcode" not in kwargs:
+ kwargs["errcode"] = Codes.TOO_LARGE
+ super(EventSizeError, self).__init__(413, *args, **kwargs)
+
+
+class EventStreamError(SynapseError):
+ """An error raised when there a problem with the event stream."""
+ def __init__(self, *args, **kwargs):
+ if "errcode" not in kwargs:
+ kwargs["errcode"] = Codes.BAD_PAGINATION
+ super(EventStreamError, self).__init__(*args, **kwargs)
+
+
+class LoginError(SynapseError):
+ """An error raised when there was a problem logging in."""
+ pass
+
+
+class StoreError(SynapseError):
+ """An error raised when there was a problem storing some data."""
+ pass
+
+
+class InvalidCaptchaError(SynapseError):
+ def __init__(self, code=400, msg="Invalid captcha.", error_url=None,
+ errcode=Codes.CAPTCHA_INVALID):
+ super(InvalidCaptchaError, self).__init__(code, msg, errcode)
+ self.error_url = error_url
+
+ def error_dict(self):
+ return cs_error(
+ self.msg,
+ self.errcode,
+ error_url=self.error_url,
+ )
+
+
+class LimitExceededError(SynapseError):
+ """A client has sent too many requests and is being throttled.
+ """
+ def __init__(self, code=429, msg="Too Many Requests", retry_after_ms=None,
+ errcode=Codes.LIMIT_EXCEEDED):
+ super(LimitExceededError, self).__init__(code, msg, errcode)
+ self.retry_after_ms = retry_after_ms
+ self.response_code_message = "Too Many Requests"
+
+ def error_dict(self):
+ return cs_error(
+ self.msg,
+ self.errcode,
+ retry_after_ms=self.retry_after_ms,
+ )
+
+
+def cs_exception(exception):
+ if isinstance(exception, CodeMessageException):
+ return exception.error_dict()
+ else:
+ logger.error("Unknown exception type: %s", type(exception))
+ return {}
+
+
+def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
+ """ Utility method for constructing an error response for client-server
+ interactions.
+
+ Args:
+ msg (str): The error message.
+ code (int): The error code.
+ kwargs : Additional keys to add to the response.
+ Returns:
+ A dict representing the error response JSON.
+ """
+ err = {"error": msg, "errcode": code}
+ for key, value in kwargs.iteritems():
+ err[key] = value
+ return err
+
+
+class FederationError(RuntimeError):
+ """ This class is used to inform remote home servers about erroneous
+ PDUs they sent us.
+
+ FATAL: The remote server could not interpret the source event.
+ (e.g., it was missing a required field)
+ ERROR: The remote server interpreted the event, but it failed some other
+ check (e.g. auth)
+ WARN: The remote server accepted the event, but believes some part of it
+ is wrong (e.g., it referred to an invalid event)
+ """
+
+ def __init__(self, level, code, reason, affected, source=None):
+ if level not in ["FATAL", "ERROR", "WARN"]:
+ raise ValueError("Level is not valid: %s" % (level,))
+ self.level = level
+ self.code = code
+ self.reason = reason
+ self.affected = affected
+ self.source = source
+
+ msg = "%s %s: %s" % (level, code, reason,)
+ super(FederationError, self).__init__(msg)
+
+ def get_dict(self):
+ return {
+ "level": self.level,
+ "code": self.code,
+ "reason": self.reason,
+ "affected": self.affected,
+ "source": self.source if self.source else self.affected,
+ }
+
+
+class HttpResponseException(CodeMessageException):
+ def __init__(self, code, msg, response):
+ self.response = response
+ super(HttpResponseException, self).__init__(code, msg)
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
new file mode 100644
index 00000000..aaa2433c
--- /dev/null
+++ b/synapse/api/filtering.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.api.errors import SynapseError
+from synapse.types import UserID, RoomID
+
+
+class Filtering(object):
+
+ def __init__(self, hs):
+ super(Filtering, self).__init__()
+ self.store = hs.get_datastore()
+
+ def get_user_filter(self, user_localpart, filter_id):
+ result = self.store.get_user_filter(user_localpart, filter_id)
+ result.addCallback(FilterCollection)
+ return result
+
+ def add_user_filter(self, user_localpart, user_filter):
+ self._check_valid_filter(user_filter)
+ return self.store.add_user_filter(user_localpart, user_filter)
+
+ # TODO(paul): surely we should probably add a delete_user_filter or
+ # replace_user_filter at some point? There's no REST API specified for
+ # them however
+
+ def _check_valid_filter(self, user_filter_json):
+ """Check if the provided filter is valid.
+
+ This inspects all definitions contained within the filter.
+
+ Args:
+ user_filter_json(dict): The filter
+ Raises:
+ SynapseError: If the filter is not valid.
+ """
+ # NB: Filters are the complete json blobs. "Definitions" are an
+ # individual top-level key e.g. public_user_data. Filters are made of
+ # many definitions.
+
+ top_level_definitions = [
+ "presence"
+ ]
+
+ room_level_definitions = [
+ "state", "timeline", "ephemeral", "private_user_data"
+ ]
+
+ for key in top_level_definitions:
+ if key in user_filter_json:
+ self._check_definition(user_filter_json[key])
+
+ if "room" in user_filter_json:
+ for key in room_level_definitions:
+ if key in user_filter_json["room"]:
+ self._check_definition(user_filter_json["room"][key])
+
+ def _check_definition(self, definition):
+ """Check if the provided definition is valid.
+
+ This inspects not only the types but also the values to make sure they
+ make sense.
+
+ Args:
+ definition(dict): The filter definition
+ Raises:
+ SynapseError: If there was a problem with this definition.
+ """
+ # NB: Filters are the complete json blobs. "Definitions" are an
+ # individual top-level key e.g. public_user_data. Filters are made of
+ # many definitions.
+ if type(definition) != dict:
+ raise SynapseError(
+ 400, "Expected JSON object, not %s" % (definition,)
+ )
+
+ # check rooms are valid room IDs
+ room_id_keys = ["rooms", "not_rooms"]
+ for key in room_id_keys:
+ if key in definition:
+ if type(definition[key]) != list:
+ raise SynapseError(400, "Expected %s to be a list." % key)
+ for room_id in definition[key]:
+ RoomID.from_string(room_id)
+
+ # check senders are valid user IDs
+ user_id_keys = ["senders", "not_senders"]
+ for key in user_id_keys:
+ if key in definition:
+ if type(definition[key]) != list:
+ raise SynapseError(400, "Expected %s to be a list." % key)
+ for user_id in definition[key]:
+ UserID.from_string(user_id)
+
+ # TODO: We don't limit event type values but we probably should...
+ # check types are valid event types
+ event_keys = ["types", "not_types"]
+ for key in event_keys:
+ if key in definition:
+ if type(definition[key]) != list:
+ raise SynapseError(400, "Expected %s to be a list." % key)
+ for event_type in definition[key]:
+ if not isinstance(event_type, basestring):
+ raise SynapseError(400, "Event type should be a string")
+
+
+class FilterCollection(object):
+ def __init__(self, filter_json):
+ self.filter_json = filter_json
+
+ self.room_timeline_filter = Filter(
+ self.filter_json.get("room", {}).get("timeline", {})
+ )
+
+ self.room_state_filter = Filter(
+ self.filter_json.get("room", {}).get("state", {})
+ )
+
+ self.room_ephemeral_filter = Filter(
+ self.filter_json.get("room", {}).get("ephemeral", {})
+ )
+
+ self.room_private_user_data = Filter(
+ self.filter_json.get("room", {}).get("private_user_data", {})
+ )
+
+ self.presence_filter = Filter(
+ self.filter_json.get("presence", {})
+ )
+
+ def timeline_limit(self):
+ return self.room_timeline_filter.limit()
+
+ def presence_limit(self):
+ return self.presence_filter.limit()
+
+ def ephemeral_limit(self):
+ return self.room_ephemeral_filter.limit()
+
+ def filter_presence(self, events):
+ return self.presence_filter.filter(events)
+
+ def filter_room_state(self, events):
+ return self.room_state_filter.filter(events)
+
+ def filter_room_timeline(self, events):
+ return self.room_timeline_filter.filter(events)
+
+ def filter_room_ephemeral(self, events):
+ return self.room_ephemeral_filter.filter(events)
+
+ def filter_room_private_user_data(self, events):
+ return self.room_private_user_data.filter(events)
+
+
+class Filter(object):
+ def __init__(self, filter_json):
+ self.filter_json = filter_json
+
+ def check(self, event):
+ """Checks whether the filter matches the given event.
+
+ Returns:
+ bool: True if the event matches
+ """
+ if isinstance(event, dict):
+ return self.check_fields(
+ event.get("room_id", None),
+ event.get("sender", None),
+ event.get("type", None),
+ )
+ else:
+ return self.check_fields(
+ getattr(event, "room_id", None),
+ getattr(event, "sender", None),
+ event.type,
+ )
+
+ def check_fields(self, room_id, sender, event_type):
+ """Checks whether the filter matches the given event fields.
+
+ Returns:
+ bool: True if the event fields match
+ """
+ literal_keys = {
+ "rooms": lambda v: room_id == v,
+ "senders": lambda v: sender == v,
+ "types": lambda v: _matches_wildcard(event_type, v)
+ }
+
+ for name, match_func in literal_keys.items():
+ not_name = "not_%s" % (name,)
+ disallowed_values = self.filter_json.get(not_name, [])
+ if any(map(match_func, disallowed_values)):
+ return False
+
+ allowed_values = self.filter_json.get(name, None)
+ if allowed_values is not None:
+ if not any(map(match_func, allowed_values)):
+ return False
+
+ return True
+
+ def filter_rooms(self, room_ids):
+ """Apply the 'rooms' filter to a given list of rooms.
+
+ Args:
+ room_ids (list): A list of room_ids.
+
+ Returns:
+ list: A list of room_ids that match the filter
+ """
+ room_ids = set(room_ids)
+
+ disallowed_rooms = set(self.filter_json.get("not_rooms", []))
+ room_ids -= disallowed_rooms
+
+ allowed_rooms = self.filter_json.get("rooms", None)
+ if allowed_rooms is not None:
+ room_ids &= set(allowed_rooms)
+
+ return room_ids
+
+ def filter(self, events):
+ return filter(self.check, events)
+
+ def limit(self):
+ return self.filter_json.get("limit", 10)
+
+
+def _matches_wildcard(actual_value, filter_value):
+ if filter_value.endswith("*"):
+ type_prefix = filter_value[:-1]
+ return actual_value.startswith(type_prefix)
+ else:
+ return actual_value == filter_value
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
new file mode 100644
index 00000000..3f9ad4ce
--- /dev/null
+++ b/synapse/api/ratelimiting.py
@@ -0,0 +1,79 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+
+class Ratelimiter(object):
+ """
+ Ratelimit message sending by user.
+ """
+
+ def __init__(self):
+ self.message_counts = collections.OrderedDict()
+
+ def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
+ """Can the user send a message?
+ Args:
+ user_id: The user sending a message.
+ time_now_s: The time now.
+ msg_rate_hz: The long term number of messages a user can send in a
+ second.
+ burst_count: How many messages the user can send before being
+ limited.
+ Returns:
+ A pair of a bool indicating if they can send a message now and a
+ time in seconds of when they can next send a message.
+ """
+ self.prune_message_counts(time_now_s)
+ message_count, time_start, _ignored = self.message_counts.pop(
+ user_id, (0., time_now_s, None),
+ )
+ time_delta = time_now_s - time_start
+ sent_count = message_count - time_delta * msg_rate_hz
+ if sent_count < 0:
+ allowed = True
+ time_start = time_now_s
+ message_count = 1.
+ elif sent_count > burst_count - 1.:
+ allowed = False
+ else:
+ allowed = True
+ message_count += 1
+
+ self.message_counts[user_id] = (
+ message_count, time_start, msg_rate_hz
+ )
+
+ if msg_rate_hz > 0:
+ time_allowed = (
+ time_start + (message_count - burst_count + 1) / msg_rate_hz
+ )
+ if time_allowed < time_now_s:
+ time_allowed = time_now_s
+ else:
+ time_allowed = -1
+
+ return allowed, time_allowed
+
+ def prune_message_counts(self, time_now_s):
+ for user_id in self.message_counts.keys():
+ message_count, time_start, msg_rate_hz = (
+ self.message_counts[user_id]
+ )
+ time_delta = time_now_s - time_start
+ if message_count - time_delta * msg_rate_hz > 0:
+ break
+ else:
+ del self.message_counts[user_id]
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
new file mode 100644
index 00000000..15c8558e
--- /dev/null
+++ b/synapse/api/urls.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains the URL paths to prefix various aspects of the server with. """
+
+CLIENT_PREFIX = "/_matrix/client/api/v1"
+CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
+FEDERATION_PREFIX = "/_matrix/federation/v1"
+STATIC_PREFIX = "/_matrix/static"
+WEB_CLIENT_PREFIX = "/_matrix/client"
+CONTENT_REPO_PREFIX = "/_matrix/content"
+SERVER_KEY_PREFIX = "/_matrix/key/v1"
+SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
+MEDIA_PREFIX = "/_matrix/media/v1"
+APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/app/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
new file mode 100755
index 00000000..cd7a52ec
--- /dev/null
+++ b/synapse/app/homeserver.py
@@ -0,0 +1,745 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+sys.dont_write_bytecode = True
+from synapse.python_dependencies import (
+ check_requirements, DEPENDENCY_LINKS, MissingRequirementError
+)
+
+if __name__ == '__main__':
+ try:
+ check_requirements()
+ except MissingRequirementError as e:
+ message = "\n".join([
+ "Missing Requirement: %s" % (e.message,),
+ "To install run:",
+ " pip install --upgrade --force \"%s\"" % (e.dependency,),
+ "",
+ ])
+ sys.stderr.writelines(message)
+ sys.exit(1)
+
+from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
+from synapse.storage import are_all_users_on_domain
+from synapse.storage.prepare_database import UpgradeDatabaseException
+
+from synapse.server import HomeServer
+
+
+from twisted.internet import reactor, task, defer
+from twisted.application import service
+from twisted.enterprise import adbapi
+from twisted.web.resource import Resource, EncodingResourceWrapper
+from twisted.web.static import File
+from twisted.web.server import Site, GzipEncoderFactory, Request
+from synapse.http.server import JsonResource, RootRedirect
+from synapse.rest.media.v0.content_repository import ContentRepoResource
+from synapse.rest.media.v1.media_repository import MediaRepositoryResource
+from synapse.rest.key.v1.server_key_resource import LocalKey
+from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.api.urls import (
+ CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
+ SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, STATIC_PREFIX,
+ SERVER_KEY_V2_PREFIX,
+)
+from synapse.config.homeserver import HomeServerConfig
+from synapse.crypto import context_factory
+from synapse.util.logcontext import LoggingContext
+from synapse.rest.client.v1 import ClientV1RestResource
+from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
+from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
+
+from synapse import events
+
+from daemonize import Daemonize
+import twisted.manhole.telnet
+
+import synapse
+
+import contextlib
+import logging
+import os
+import re
+import resource
+import subprocess
+import time
+
+
+logger = logging.getLogger("synapse.app.homeserver")
+
+
+def gz_wrap(r):
+ return EncodingResourceWrapper(r, [GzipEncoderFactory()])
+
+
+class SynapseHomeServer(HomeServer):
+
+ def build_http_client(self):
+ return MatrixFederationHttpClient(self)
+
+ def build_resource_for_client(self):
+ return ClientV1RestResource(self)
+
+ def build_resource_for_client_v2_alpha(self):
+ return ClientV2AlphaRestResource(self)
+
+ def build_resource_for_federation(self):
+ return JsonResource(self)
+
+ def build_resource_for_web_client(self):
+ webclient_path = self.get_config().web_client_location
+ if not webclient_path:
+ try:
+ import syweb
+ except ImportError:
+ quit_with_error(
+ "Could not find a webclient.\n\n"
+ "Please either install the matrix-angular-sdk or configure\n"
+ "the location of the source to serve via the configuration\n"
+ "option `web_client_location`\n\n"
+ "To install the `matrix-angular-sdk` via pip, run:\n\n"
+ " pip install '%(dep)s'\n"
+ "\n"
+ "You can also disable hosting of the webclient via the\n"
+ "configuration option `web_client`\n"
+ % {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
+ )
+ syweb_path = os.path.dirname(syweb.__file__)
+ webclient_path = os.path.join(syweb_path, "webclient")
+ # GZip is disabled here due to
+ # https://twistedmatrix.com/trac/ticket/7678
+ # (It can stay enabled for the API resources: they call
+ # write() with the whole body and then finish() straight
+ # after and so do not trigger the bug.
+ # GzipFile was removed in commit 184ba09
+ # return GzipFile(webclient_path) # TODO configurable?
+ return File(webclient_path) # TODO configurable?
+
+ def build_resource_for_static_content(self):
+ # This is old and should go away: not going to bother adding gzip
+ return File(
+ os.path.join(os.path.dirname(synapse.__file__), "static")
+ )
+
+ def build_resource_for_content_repo(self):
+ return ContentRepoResource(
+ self, self.config.uploads_path, self.auth, self.content_addr
+ )
+
+ def build_resource_for_media_repository(self):
+ return MediaRepositoryResource(self)
+
+ def build_resource_for_server_key(self):
+ return LocalKey(self)
+
+ def build_resource_for_server_key_v2(self):
+ return KeyApiV2Resource(self)
+
+ def build_resource_for_metrics(self):
+ if self.get_config().enable_metrics:
+ return MetricsResource(self)
+ else:
+ return None
+
+ def build_db_pool(self):
+ name = self.db_config["name"]
+
+ return adbapi.ConnectionPool(
+ name,
+ **self.db_config.get("args", {})
+ )
+
+ def _listener_http(self, config, listener_config):
+ port = listener_config["port"]
+ bind_address = listener_config.get("bind_address", "")
+ tls = listener_config.get("tls", False)
+ site_tag = listener_config.get("tag", port)
+
+ if tls and config.no_tls:
+ return
+
+ metrics_resource = self.get_resource_for_metrics()
+
+ resources = {}
+ for res in listener_config["resources"]:
+ for name in res["names"]:
+ if name == "client":
+ if res["compress"]:
+ client_v1 = gz_wrap(self.get_resource_for_client())
+ client_v2 = gz_wrap(self.get_resource_for_client_v2_alpha())
+ else:
+ client_v1 = self.get_resource_for_client()
+ client_v2 = self.get_resource_for_client_v2_alpha()
+
+ resources.update({
+ CLIENT_PREFIX: client_v1,
+ CLIENT_V2_ALPHA_PREFIX: client_v2,
+ })
+
+ if name == "federation":
+ resources.update({
+ FEDERATION_PREFIX: self.get_resource_for_federation(),
+ })
+
+ if name in ["static", "client"]:
+ resources.update({
+ STATIC_PREFIX: self.get_resource_for_static_content(),
+ })
+
+ if name in ["media", "federation", "client"]:
+ resources.update({
+ MEDIA_PREFIX: self.get_resource_for_media_repository(),
+ CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
+ })
+
+ if name in ["keys", "federation"]:
+ resources.update({
+ SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
+ SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
+ })
+
+ if name == "webclient":
+ resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
+
+ if name == "metrics" and metrics_resource:
+ resources[METRICS_PREFIX] = metrics_resource
+
+ root_resource = create_resource_tree(resources)
+ if tls:
+ reactor.listenSSL(
+ port,
+ SynapseSite(
+ "synapse.access.https.%s" % (site_tag,),
+ site_tag,
+ listener_config,
+ root_resource,
+ ),
+ self.tls_server_context_factory,
+ interface=bind_address
+ )
+ else:
+ reactor.listenTCP(
+ port,
+ SynapseSite(
+ "synapse.access.http.%s" % (site_tag,),
+ site_tag,
+ listener_config,
+ root_resource,
+ ),
+ interface=bind_address
+ )
+ logger.info("Synapse now listening on port %d", port)
+
+ def start_listening(self):
+ config = self.get_config()
+
+ for listener in config.listeners:
+ if listener["type"] == "http":
+ self._listener_http(config, listener)
+ elif listener["type"] == "manhole":
+ f = twisted.manhole.telnet.ShellFactory()
+ f.username = "matrix"
+ f.password = "rabbithole"
+ f.namespace['hs'] = self
+ reactor.listenTCP(
+ listener["port"],
+ f,
+ interface=listener.get("bind_address", '127.0.0.1')
+ )
+ else:
+ logger.warn("Unrecognized listener type: %s", listener["type"])
+
+ def run_startup_checks(self, db_conn, database_engine):
+ all_users_native = are_all_users_on_domain(
+ db_conn.cursor(), database_engine, self.hostname
+ )
+ if not all_users_native:
+ quit_with_error(
+ "Found users in database not native to %s!\n"
+ "You cannot changed a synapse server_name after it's been configured"
+ % (self.hostname,)
+ )
+
+ try:
+ database_engine.check_database(db_conn.cursor())
+ except IncorrectDatabaseSetup as e:
+ quit_with_error(e.message)
+
+
+def quit_with_error(error_string):
+ message_lines = error_string.split("\n")
+ line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
+ sys.stderr.write("*" * line_length + '\n')
+ for line in message_lines:
+ sys.stderr.write(" %s\n" % (line.rstrip(),))
+ sys.stderr.write("*" * line_length + '\n')
+ sys.exit(1)
+
+
+def get_version_string():
+ try:
+ null = open(os.devnull, 'w')
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ try:
+ git_branch = subprocess.check_output(
+ ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
+ stderr=null,
+ cwd=cwd,
+ ).strip()
+ git_branch = "b=" + git_branch
+ except subprocess.CalledProcessError:
+ git_branch = ""
+
+ try:
+ git_tag = subprocess.check_output(
+ ['git', 'describe', '--exact-match'],
+ stderr=null,
+ cwd=cwd,
+ ).strip()
+ git_tag = "t=" + git_tag
+ except subprocess.CalledProcessError:
+ git_tag = ""
+
+ try:
+ git_commit = subprocess.check_output(
+ ['git', 'rev-parse', '--short', 'HEAD'],
+ stderr=null,
+ cwd=cwd,
+ ).strip()
+ except subprocess.CalledProcessError:
+ git_commit = ""
+
+ try:
+ dirty_string = "-this_is_a_dirty_checkout"
+ is_dirty = subprocess.check_output(
+ ['git', 'describe', '--dirty=' + dirty_string],
+ stderr=null,
+ cwd=cwd,
+ ).strip().endswith(dirty_string)
+
+ git_dirty = "dirty" if is_dirty else ""
+ except subprocess.CalledProcessError:
+ git_dirty = ""
+
+ if git_branch or git_tag or git_commit or git_dirty:
+ git_version = ",".join(
+ s for s in
+ (git_branch, git_tag, git_commit, git_dirty,)
+ if s
+ )
+
+ return (
+ "Synapse/%s (%s)" % (
+ synapse.__version__, git_version,
+ )
+ ).encode("ascii")
+ except Exception as e:
+ logger.info("Failed to check for git repository: %s", e)
+
+ return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
+
+
+def change_resource_limit(soft_file_no):
+ try:
+ soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ if not soft_file_no:
+ soft_file_no = hard
+
+ resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
+
+ logger.info("Set file limit to: %d", soft_file_no)
+ except (ValueError, resource.error) as e:
+ logger.warn("Failed to set file limit: %s", e)
+
+
+def setup(config_options):
+ """
+ Args:
+ config_options_options: The options passed to Synapse. Usually
+ `sys.argv[1:]`.
+
+ Returns:
+ HomeServer
+ """
+ config = HomeServerConfig.load_config(
+ "Synapse Homeserver",
+ config_options,
+ generate_section="Homeserver"
+ )
+
+ config.setup_logging()
+
+ # check any extra requirements we have now we have a config
+ check_requirements(config)
+
+ version_string = get_version_string()
+
+ logger.info("Server hostname: %s", config.server_name)
+ logger.info("Server version: %s", version_string)
+
+ events.USE_FROZEN_DICTS = config.use_frozen_dicts
+
+ tls_server_context_factory = context_factory.ServerContextFactory(config)
+
+ database_engine = create_engine(config.database_config["name"])
+ config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
+
+ hs = SynapseHomeServer(
+ config.server_name,
+ db_config=config.database_config,
+ tls_server_context_factory=tls_server_context_factory,
+ config=config,
+ content_addr=config.content_addr,
+ version_string=version_string,
+ database_engine=database_engine,
+ )
+
+ logger.info("Preparing database: %s...", config.database_config['name'])
+
+ try:
+ db_conn = database_engine.module.connect(
+ **{
+ k: v for k, v in config.database_config.get("args", {}).items()
+ if not k.startswith("cp_")
+ }
+ )
+
+ database_engine.prepare_database(db_conn)
+ hs.run_startup_checks(db_conn, database_engine)
+
+ db_conn.commit()
+ except UpgradeDatabaseException:
+ sys.stderr.write(
+ "\nFailed to upgrade database.\n"
+ "Have you checked for version specific instructions in"
+ " UPGRADES.rst?\n"
+ )
+ sys.exit(1)
+
+ logger.info("Database prepared in %s.", config.database_config['name'])
+
+ hs.start_listening()
+
+ hs.get_pusherpool().start()
+ hs.get_state_handler().start_caching()
+ hs.get_datastore().start_profiling()
+ hs.get_datastore().start_doing_background_updates()
+ hs.get_replication_layer().start_get_pdu_cache()
+
+ return hs
+
+
+class SynapseService(service.Service):
+ """A twisted Service class that will start synapse. Used to run synapse
+ via twistd and a .tac.
+ """
+ def __init__(self, config):
+ self.config = config
+
+ def startService(self):
+ hs = setup(self.config)
+ change_resource_limit(hs.config.soft_file_limit)
+
+ def stopService(self):
+ return self._port.stopListening()
+
+
+class SynapseRequest(Request):
+ def __init__(self, site, *args, **kw):
+ Request.__init__(self, *args, **kw)
+ self.site = site
+ self.authenticated_entity = None
+ self.start_time = 0
+
+ def __repr__(self):
+ # We overwrite this so that we don't log ``access_token``
+ return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % (
+ self.__class__.__name__,
+ id(self),
+ self.method,
+ self.get_redacted_uri(),
+ self.clientproto,
+ self.site.site_tag,
+ )
+
+ def get_redacted_uri(self):
+ return re.sub(
+ r'(\?.*access_token=)[^&]*(.*)$',
+ r'\1<redacted>\2',
+ self.uri
+ )
+
+ def get_user_agent(self):
+ return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1]
+
+ def started_processing(self):
+ self.site.access_logger.info(
+ "%s - %s - Received request: %s %s",
+ self.getClientIP(),
+ self.site.site_tag,
+ self.method,
+ self.get_redacted_uri()
+ )
+ self.start_time = int(time.time() * 1000)
+
+ def finished_processing(self):
+ self.site.access_logger.info(
+ "%s - %s - {%s}"
+ " Processed request: %dms %sB %s \"%s %s %s\" \"%s\"",
+ self.getClientIP(),
+ self.site.site_tag,
+ self.authenticated_entity,
+ int(time.time() * 1000) - self.start_time,
+ self.sentLength,
+ self.code,
+ self.method,
+ self.get_redacted_uri(),
+ self.clientproto,
+ self.get_user_agent(),
+ )
+
+ @contextlib.contextmanager
+ def processing(self):
+ self.started_processing()
+ yield
+ self.finished_processing()
+
+
+class XForwardedForRequest(SynapseRequest):
+ def __init__(self, *args, **kw):
+ SynapseRequest.__init__(self, *args, **kw)
+
+ """
+ Add a layer on top of another request that only uses the value of an
+ X-Forwarded-For header as the result of C{getClientIP}.
+ """
+ def getClientIP(self):
+ """
+ @return: The client address (the first address) in the value of the
+ I{X-Forwarded-For header}. If the header is not present, return
+ C{b"-"}.
+ """
+ return self.requestHeaders.getRawHeaders(
+ b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
+
+
+class SynapseRequestFactory(object):
+ def __init__(self, site, x_forwarded_for):
+ self.site = site
+ self.x_forwarded_for = x_forwarded_for
+
+ def __call__(self, *args, **kwargs):
+ if self.x_forwarded_for:
+ return XForwardedForRequest(self.site, *args, **kwargs)
+ else:
+ return SynapseRequest(self.site, *args, **kwargs)
+
+
+class SynapseSite(Site):
+ """
+ Subclass of a twisted http Site that does access logging with python's
+ standard logging
+ """
+ def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs):
+ Site.__init__(self, resource, *args, **kwargs)
+
+ self.site_tag = site_tag
+
+ proxied = config.get("x_forwarded", False)
+ self.requestFactory = SynapseRequestFactory(self, proxied)
+ self.access_logger = logging.getLogger(logger_name)
+
+ def log(self, request):
+ pass
+
+
+def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
+ """Create the resource tree for this Home Server.
+
+ This in unduly complicated because Twisted does not support putting
+ child resources more than 1 level deep at a time.
+
+ Args:
+ web_client (bool): True to enable the web client.
+ redirect_root_to_web_client (bool): True to redirect '/' to the
+ location of the web client. This does nothing if web_client is not
+ True.
+ """
+ if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
+ root_resource = RootRedirect(WEB_CLIENT_PREFIX)
+ else:
+ root_resource = Resource()
+
+ # ideally we'd just use getChild and putChild but getChild doesn't work
+ # unless you give it a Request object IN ADDITION to the name :/ So
+ # instead, we'll store a copy of this mapping so we can actually add
+ # extra resources to existing nodes. See self._resource_id for the key.
+ resource_mappings = {}
+ for full_path, res in desired_tree.items():
+ logger.info("Attaching %s to path %s", res, full_path)
+ last_resource = root_resource
+ for path_seg in full_path.split('/')[1:-1]:
+ if path_seg not in last_resource.listNames():
+ # resource doesn't exist, so make a "dummy resource"
+ child_resource = Resource()
+ last_resource.putChild(path_seg, child_resource)
+ res_id = _resource_id(last_resource, path_seg)
+ resource_mappings[res_id] = child_resource
+ last_resource = child_resource
+ else:
+ # we have an existing Resource, use that instead.
+ res_id = _resource_id(last_resource, path_seg)
+ last_resource = resource_mappings[res_id]
+
+ # ===========================
+ # now attach the actual desired resource
+ last_path_seg = full_path.split('/')[-1]
+
+ # if there is already a resource here, thieve its children and
+ # replace it
+ res_id = _resource_id(last_resource, last_path_seg)
+ if res_id in resource_mappings:
+ # there is a dummy resource at this path already, which needs
+ # to be replaced with the desired resource.
+ existing_dummy_resource = resource_mappings[res_id]
+ for child_name in existing_dummy_resource.listNames():
+ child_res_id = _resource_id(
+ existing_dummy_resource, child_name
+ )
+ child_resource = resource_mappings[child_res_id]
+ # steal the children
+ res.putChild(child_name, child_resource)
+
+ # finally, insert the desired resource in the right place
+ last_resource.putChild(last_path_seg, res)
+ res_id = _resource_id(last_resource, last_path_seg)
+ resource_mappings[res_id] = res
+
+ return root_resource
+
+
+def _resource_id(resource, path_seg):
+ """Construct an arbitrary resource ID so you can retrieve the mapping
+ later.
+
+ If you want to represent resource A putChild resource B with path C,
+ the mapping should looks like _resource_id(A,C) = B.
+
+ Args:
+ resource (Resource): The *parent* Resource
+ path_seg (str): The name of the child Resource to be attached.
+ Returns:
+ str: A unique string which can be a key to the child Resource.
+ """
+ return "%s-%s" % (resource, path_seg)
+
+
+def run(hs):
+ PROFILE_SYNAPSE = False
+ if PROFILE_SYNAPSE:
+ def profile(func):
+ from cProfile import Profile
+ from threading import current_thread
+
+ def profiled(*args, **kargs):
+ profile = Profile()
+ profile.enable()
+ func(*args, **kargs)
+ profile.disable()
+ ident = current_thread().ident
+ profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
+ hs.hostname, func.__name__, ident
+ ))
+
+ return profiled
+
+ from twisted.python.threadpool import ThreadPool
+ ThreadPool._worker = profile(ThreadPool._worker)
+ reactor.run = profile(reactor.run)
+
+ start_time = hs.get_clock().time()
+
+ @defer.inlineCallbacks
+ def phone_stats_home():
+ now = int(hs.get_clock().time())
+ uptime = int(now - start_time)
+ if uptime < 0:
+ uptime = 0
+
+ stats = {}
+ stats["homeserver"] = hs.config.server_name
+ stats["timestamp"] = now
+ stats["uptime_seconds"] = uptime
+ stats["total_users"] = yield hs.get_datastore().count_all_users()
+
+ all_rooms = yield hs.get_datastore().get_rooms(False)
+ stats["total_room_count"] = len(all_rooms)
+
+ stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
+ daily_messages = yield hs.get_datastore().count_daily_messages()
+ if daily_messages is not None:
+ stats["daily_messages"] = daily_messages
+
+ logger.info("Reporting stats to matrix.org: %s" % (stats,))
+ try:
+ yield hs.get_simple_http_client().put_json(
+ "https://matrix.org/report-usage-stats/push",
+ stats
+ )
+ except Exception as e:
+ logger.warn("Error reporting stats: %s", e)
+
+ if hs.config.report_stats:
+ phone_home_task = task.LoopingCall(phone_stats_home)
+ phone_home_task.start(60 * 60 * 24, now=False)
+
+ def in_thread():
+ with LoggingContext("run"):
+ change_resource_limit(hs.config.soft_file_limit)
+ reactor.run()
+
+ if hs.config.daemonize:
+
+ if hs.config.print_pidfile:
+ print hs.config.pid_file
+
+ daemon = Daemonize(
+ app="synapse-homeserver",
+ pid=hs.config.pid_file,
+ action=lambda: in_thread(),
+ auto_close_fds=False,
+ verbose=True,
+ logger=logger,
+ )
+
+ daemon.start()
+ else:
+ in_thread()
+
+
+def main():
+ with LoggingContext("main"):
+ # check base requirements
+ check_requirements()
+ hs = setup(sys.argv[1:])
+ run(hs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
new file mode 100755
index 00000000..5d82beed
--- /dev/null
+++ b/synapse/app/synctl.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import os
+import os.path
+import subprocess
+import signal
+import yaml
+
+SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
+
+GREEN = "\x1b[1;32m"
+RED = "\x1b[1;31m"
+NORMAL = "\x1b[m"
+
+
+def start(configfile):
+ print "Starting ...",
+ args = SYNAPSE
+ args.extend(["--daemonize", "-c", configfile])
+
+ try:
+ subprocess.check_call(args)
+ print GREEN + "started" + NORMAL
+ except subprocess.CalledProcessError as e:
+ print (
+ RED +
+ "error starting (exit code: %d); see above for logs" % e.returncode +
+ NORMAL
+ )
+
+
+def stop(pidfile):
+ if os.path.exists(pidfile):
+ pid = int(open(pidfile).read())
+ os.kill(pid, signal.SIGTERM)
+ print GREEN + "stopped" + NORMAL
+
+
+def main():
+ configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
+
+ if not os.path.exists(configfile):
+ sys.stderr.write(
+ "No config file found\n"
+ "To generate a config file, run '%s -c %s --generate-config"
+ " --server-name=<server name>'\n" % (
+ " ".join(SYNAPSE), configfile
+ )
+ )
+ sys.exit(1)
+
+ config = yaml.load(open(configfile))
+ pidfile = config["pid_file"]
+
+ action = sys.argv[1] if sys.argv[1:] else "usage"
+ if action == "start":
+ start(configfile)
+ elif action == "stop":
+ stop(pidfile)
+ elif action == "restart":
+ stop(pidfile)
+ start(configfile)
+ else:
+ sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
new file mode 100644
index 00000000..e3ca45de
--- /dev/null
+++ b/synapse/appservice/__init__.py
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.api.constants import EventTypes
+
+import logging
+import re
+
+logger = logging.getLogger(__name__)
+
+
+class ApplicationServiceState(object):
+ DOWN = "down"
+ UP = "up"
+
+
+class AppServiceTransaction(object):
+ """Represents an application service transaction."""
+
+ def __init__(self, service, id, events):
+ self.service = service
+ self.id = id
+ self.events = events
+
+ def send(self, as_api):
+ """Sends this transaction using the provided AS API interface.
+
+ Args:
+ as_api(ApplicationServiceApi): The API to use to send.
+ Returns:
+ A Deferred which resolves to True if the transaction was sent.
+ """
+ return as_api.push_bulk(
+ service=self.service,
+ events=self.events,
+ txn_id=self.id
+ )
+
+ def complete(self, store):
+ """Completes this transaction as successful.
+
+ Marks this transaction ID on the application service and removes the
+ transaction contents from the database.
+
+ Args:
+ store: The database store to operate on.
+ Returns:
+ A Deferred which resolves to True if the transaction was completed.
+ """
+ return store.complete_appservice_txn(
+ service=self.service,
+ txn_id=self.id
+ )
+
+
+class ApplicationService(object):
+ """Defines an application service. This definition is mostly what is
+ provided to the /register AS API.
+
+ Provides methods to check if this service is "interested" in events.
+ """
+ NS_USERS = "users"
+ NS_ALIASES = "aliases"
+ NS_ROOMS = "rooms"
+ # The ordering here is important as it is used to map database values (which
+ # are stored as ints representing the position in this list) to namespace
+ # values.
+ NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
+
+ def __init__(self, token, url=None, namespaces=None, hs_token=None,
+ sender=None, id=None):
+ self.token = token
+ self.url = url
+ self.hs_token = hs_token
+ self.sender = sender
+ self.namespaces = self._check_namespaces(namespaces)
+ self.id = id
+
+ def _check_namespaces(self, namespaces):
+ # Sanity check that it is of the form:
+ # {
+ # users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
+ # aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
+ # rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
+ # }
+ if not namespaces:
+ namespaces = {}
+
+ for ns in ApplicationService.NS_LIST:
+ if ns not in namespaces:
+ namespaces[ns] = []
+ continue
+
+ if type(namespaces[ns]) != list:
+ raise ValueError("Bad namespace value for '%s'" % ns)
+ for regex_obj in namespaces[ns]:
+ if not isinstance(regex_obj, dict):
+ raise ValueError("Expected dict regex for ns '%s'" % ns)
+ if not isinstance(regex_obj.get("exclusive"), bool):
+ raise ValueError(
+ "Expected bool for 'exclusive' in ns '%s'" % ns
+ )
+ if not isinstance(regex_obj.get("regex"), basestring):
+ raise ValueError(
+ "Expected string for 'regex' in ns '%s'" % ns
+ )
+ return namespaces
+
+ def _matches_regex(self, test_string, namespace_key, return_obj=False):
+ if not isinstance(test_string, basestring):
+ logger.error(
+ "Expected a string to test regex against, but got %s",
+ test_string
+ )
+ return False
+
+ for regex_obj in self.namespaces[namespace_key]:
+ if re.match(regex_obj["regex"], test_string):
+ if return_obj:
+ return regex_obj
+ return True
+ return False
+
+ def _is_exclusive(self, ns_key, test_string):
+ regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
+ if regex_obj:
+ return regex_obj["exclusive"]
+ return False
+
+ def _matches_user(self, event, member_list):
+ if (hasattr(event, "sender") and
+ self.is_interested_in_user(event.sender)):
+ return True
+ # also check m.room.member state key
+ if (hasattr(event, "type") and event.type == EventTypes.Member
+ and hasattr(event, "state_key")
+ and self.is_interested_in_user(event.state_key)):
+ return True
+ # check joined member events
+ for user_id in member_list:
+ if self.is_interested_in_user(user_id):
+ return True
+ return False
+
+ def _matches_room_id(self, event):
+ if hasattr(event, "room_id"):
+ return self.is_interested_in_room(event.room_id)
+ return False
+
+ def _matches_aliases(self, event, alias_list):
+ for alias in alias_list:
+ if self.is_interested_in_alias(alias):
+ return True
+ return False
+
+ def is_interested(self, event, restrict_to=None, aliases_for_event=None,
+ member_list=None):
+ """Check if this service is interested in this event.
+
+ Args:
+ event(Event): The event to check.
+ restrict_to(str): The namespace to restrict regex tests to.
+ aliases_for_event(list): A list of all the known room aliases for
+ this event.
+ member_list(list): A list of all joined user_ids in this room.
+ Returns:
+ bool: True if this service would like to know about this event.
+ """
+ if aliases_for_event is None:
+ aliases_for_event = []
+ if member_list is None:
+ member_list = []
+
+ if restrict_to and restrict_to not in ApplicationService.NS_LIST:
+ # this is a programming error, so fail early and raise a general
+ # exception
+ raise Exception("Unexpected restrict_to value: %s". restrict_to)
+
+ if not restrict_to:
+ return (self._matches_user(event, member_list)
+ or self._matches_aliases(event, aliases_for_event)
+ or self._matches_room_id(event))
+ elif restrict_to == ApplicationService.NS_ALIASES:
+ return self._matches_aliases(event, aliases_for_event)
+ elif restrict_to == ApplicationService.NS_ROOMS:
+ return self._matches_room_id(event)
+ elif restrict_to == ApplicationService.NS_USERS:
+ return self._matches_user(event, member_list)
+
+ def is_interested_in_user(self, user_id):
+ return (
+ self._matches_regex(user_id, ApplicationService.NS_USERS)
+ or user_id == self.sender
+ )
+
+ def is_interested_in_alias(self, alias):
+ return self._matches_regex(alias, ApplicationService.NS_ALIASES)
+
+ def is_interested_in_room(self, room_id):
+ return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
+
+ def is_exclusive_user(self, user_id):
+ return (
+ self._is_exclusive(ApplicationService.NS_USERS, user_id)
+ or user_id == self.sender
+ )
+
+ def is_exclusive_alias(self, alias):
+ return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
+
+ def is_exclusive_room(self, room_id):
+ return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
+
+ def __str__(self):
+ return "ApplicationService: %s" % (self.__dict__,)
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
new file mode 100644
index 00000000..2a9beccc
--- /dev/null
+++ b/synapse/appservice/api.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+
+from synapse.api.errors import CodeMessageException
+from synapse.http.client import SimpleHttpClient
+from synapse.events.utils import serialize_event
+
+import logging
+import urllib
+
+logger = logging.getLogger(__name__)
+
+
+class ApplicationServiceApi(SimpleHttpClient):
+ """This class manages HS -> AS communications, including querying and
+ pushing.
+ """
+
+ def __init__(self, hs):
+ super(ApplicationServiceApi, self).__init__(hs)
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def query_user(self, service, user_id):
+ uri = service.url + ("/users/%s" % urllib.quote(user_id))
+ response = None
+ try:
+ response = yield self.get_json(uri, {
+ "access_token": service.hs_token
+ })
+ if response is not None: # just an empty json object
+ defer.returnValue(True)
+ except CodeMessageException as e:
+ if e.code == 404:
+ defer.returnValue(False)
+ return
+ logger.warning("query_user to %s received %s", uri, e.code)
+ except Exception as ex:
+ logger.warning("query_user to %s threw exception %s", uri, ex)
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def query_alias(self, service, alias):
+ uri = service.url + ("/rooms/%s" % urllib.quote(alias))
+ response = None
+ try:
+ response = yield self.get_json(uri, {
+ "access_token": service.hs_token
+ })
+ if response is not None: # just an empty json object
+ defer.returnValue(True)
+ except CodeMessageException as e:
+ logger.warning("query_alias to %s received %s", uri, e.code)
+ if e.code == 404:
+ defer.returnValue(False)
+ return
+ except Exception as ex:
+ logger.warning("query_alias to %s threw exception %s", uri, ex)
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def push_bulk(self, service, events, txn_id=None):
+ events = self._serialize(events)
+
+ if txn_id is None:
+ logger.warning("push_bulk: Missing txn ID sending events to %s",
+ service.url)
+ txn_id = str(0)
+ txn_id = str(txn_id)
+
+ uri = service.url + ("/transactions/%s" %
+ urllib.quote(txn_id))
+ try:
+ yield self.put_json(
+ uri=uri,
+ json_body={
+ "events": events
+ },
+ args={
+ "access_token": service.hs_token
+ })
+ defer.returnValue(True)
+ return
+ except CodeMessageException as e:
+ logger.warning("push_bulk to %s received %s", uri, e.code)
+ except Exception as ex:
+ logger.warning("push_bulk to %s threw exception %s", uri, ex)
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def push(self, service, event, txn_id=None):
+ response = yield self.push_bulk(service, [event], txn_id)
+ defer.returnValue(response)
+
+ def _serialize(self, events):
+ time_now = self.clock.time_msec()
+ return [
+ serialize_event(e, time_now, as_client_event=True) for e in events
+ ]
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
new file mode 100644
index 00000000..44dc2c47
--- /dev/null
+++ b/synapse/appservice/scheduler.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This module controls the reliability for application service transactions.
+
+The nominal flow through this module looks like:
+ __________
+1---ASa[e]-->| Service |--> Queue ASa[f]
+2----ASb[e]->| Queuer |
+3--ASa[f]--->|__________|-----------+ ASa[e], ASb[e]
+ V
+ -````````- +------------+
+ |````````|<--StoreTxn-|Transaction |
+ |Database| | Controller |---> SEND TO AS
+ `--------` +------------+
+What happens on SEND TO AS depends on the state of the Application Service:
+ - If the AS is marked as DOWN, do nothing.
+ - If the AS is marked as UP, send the transaction.
+ * SUCCESS : Increment where the AS is up to txn-wise and nuke the txn
+ contents from the db.
+ * FAILURE : Marked AS as DOWN and start Recoverer.
+
+Recoverer attempts to recover ASes who have died. The flow for this looks like:
+ ,--------------------- backoff++ --------------.
+ V |
+ START ---> Wait exp ------> Get oldest txn ID from ----> FAILURE
+ backoff DB and try to send it
+ ^ |___________
+Mark AS as | V
+UP & quit +---------- YES SUCCESS
+ | | |
+ NO <--- Have more txns? <------ Mark txn success & nuke <-+
+ from db; incr AS pos.
+ Reset backoff.
+
+This is all tied together by the AppServiceScheduler which DIs the required
+components.
+"""
+
+from synapse.appservice import ApplicationServiceState
+from twisted.internet import defer
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class AppServiceScheduler(object):
+ """ Public facing API for this module. Does the required DI to tie the
+ components together. This also serves as the "event_pool", which in this
+ case is a simple array.
+ """
+
+ def __init__(self, clock, store, as_api):
+ self.clock = clock
+ self.store = store
+ self.as_api = as_api
+
+ def create_recoverer(service, callback):
+ return _Recoverer(clock, store, as_api, service, callback)
+
+ self.txn_ctrl = _TransactionController(
+ clock, store, as_api, create_recoverer
+ )
+ self.queuer = _ServiceQueuer(self.txn_ctrl)
+
+ @defer.inlineCallbacks
+ def start(self):
+ logger.info("Starting appservice scheduler")
+ # check for any DOWN ASes and start recoverers for them.
+ recoverers = yield _Recoverer.start(
+ self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
+ )
+ self.txn_ctrl.add_recoverers(recoverers)
+
+ def submit_event_for_as(self, service, event):
+ self.queuer.enqueue(service, event)
+
+
+class _ServiceQueuer(object):
+ """Queues events for the same application service together, sending
+ transactions as soon as possible. Once a transaction is sent successfully,
+ this schedules any other events in the queue to run.
+ """
+
+ def __init__(self, txn_ctrl):
+ self.queued_events = {} # dict of {service_id: [events]}
+ self.pending_requests = {} # dict of {service_id: Deferred}
+ self.txn_ctrl = txn_ctrl
+
+ def enqueue(self, service, event):
+ # if this service isn't being sent something
+ if not self.pending_requests.get(service.id):
+ self._send_request(service, [event])
+ else:
+ # add to queue for this service
+ if service.id not in self.queued_events:
+ self.queued_events[service.id] = []
+ self.queued_events[service.id].append(event)
+
+ def _send_request(self, service, events):
+ # send request and add callbacks
+ d = self.txn_ctrl.send(service, events)
+ d.addBoth(self._on_request_finish)
+ d.addErrback(self._on_request_fail)
+ self.pending_requests[service.id] = d
+
+ def _on_request_finish(self, service):
+ self.pending_requests[service.id] = None
+ # if there are queued events, then send them.
+ if (service.id in self.queued_events
+ and len(self.queued_events[service.id]) > 0):
+ self._send_request(service, self.queued_events[service.id])
+ self.queued_events[service.id] = []
+
+ def _on_request_fail(self, err):
+ logger.error("AS request failed: %s", err)
+
+
+class _TransactionController(object):
+
+ def __init__(self, clock, store, as_api, recoverer_fn):
+ self.clock = clock
+ self.store = store
+ self.as_api = as_api
+ self.recoverer_fn = recoverer_fn
+ # keep track of how many recoverers there are
+ self.recoverers = []
+
+ @defer.inlineCallbacks
+ def send(self, service, events):
+ try:
+ txn = yield self.store.create_appservice_txn(
+ service=service,
+ events=events
+ )
+ service_is_up = yield self._is_service_up(service)
+ if service_is_up:
+ sent = yield txn.send(self.as_api)
+ if sent:
+ txn.complete(self.store)
+ else:
+ self._start_recoverer(service)
+ except Exception as e:
+ logger.exception(e)
+ self._start_recoverer(service)
+ # request has finished
+ defer.returnValue(service)
+
+ @defer.inlineCallbacks
+ def on_recovered(self, recoverer):
+ self.recoverers.remove(recoverer)
+ logger.info("Successfully recovered application service AS ID %s",
+ recoverer.service.id)
+ logger.info("Remaining active recoverers: %s", len(self.recoverers))
+ yield self.store.set_appservice_state(
+ recoverer.service,
+ ApplicationServiceState.UP
+ )
+
+ def add_recoverers(self, recoverers):
+ for r in recoverers:
+ self.recoverers.append(r)
+ if len(recoverers) > 0:
+ logger.info("New active recoverers: %s", len(self.recoverers))
+
+ @defer.inlineCallbacks
+ def _start_recoverer(self, service):
+ yield self.store.set_appservice_state(
+ service,
+ ApplicationServiceState.DOWN
+ )
+ logger.info(
+ "Application service falling behind. Starting recoverer. AS ID %s",
+ service.id
+ )
+ recoverer = self.recoverer_fn(service, self.on_recovered)
+ self.add_recoverers([recoverer])
+ recoverer.recover()
+
+ @defer.inlineCallbacks
+ def _is_service_up(self, service):
+ state = yield self.store.get_appservice_state(service)
+ defer.returnValue(state == ApplicationServiceState.UP or state is None)
+
+
+class _Recoverer(object):
+
+ @staticmethod
+ @defer.inlineCallbacks
+ def start(clock, store, as_api, callback):
+ services = yield store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
+ )
+ recoverers = [
+ _Recoverer(clock, store, as_api, s, callback) for s in services
+ ]
+ for r in recoverers:
+ logger.info("Starting recoverer for AS ID %s which was marked as "
+ "DOWN", r.service.id)
+ r.recover()
+ defer.returnValue(recoverers)
+
+ def __init__(self, clock, store, as_api, service, callback):
+ self.clock = clock
+ self.store = store
+ self.as_api = as_api
+ self.service = service
+ self.callback = callback
+ self.backoff_counter = 1
+
+ def recover(self):
+ self.clock.call_later((2 ** self.backoff_counter), self.retry)
+
+ def _backoff(self):
+ # cap the backoff to be around 8.5min => (2^9) = 512 secs
+ if self.backoff_counter < 9:
+ self.backoff_counter += 1
+ self.recover()
+
+ @defer.inlineCallbacks
+ def retry(self):
+ try:
+ txn = yield self.store.get_oldest_unsent_txn(self.service)
+ if txn:
+ logger.info("Retrying transaction %s for AS ID %s",
+ txn.id, txn.service.id)
+ sent = yield txn.send(self.as_api)
+ if sent:
+ yield txn.complete(self.store)
+ # reset the backoff counter and retry immediately
+ self.backoff_counter = 1
+ yield self.retry()
+ else:
+ self._backoff()
+ else:
+ self._set_service_recovered()
+ except Exception as e:
+ logger.exception(e)
+ self._backoff()
+
+ def _set_service_recovered(self):
+ self.callback(self)
diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/config/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
new file mode 100644
index 00000000..f822d120
--- /dev/null
+++ b/synapse/config/__main__.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if __name__ == "__main__":
+ import sys
+ from homeserver import HomeServerConfig
+
+ action = sys.argv[1]
+
+ if action == "read":
+ key = sys.argv[2]
+ config = HomeServerConfig.load_config("", sys.argv[3:])
+
+ print getattr(config, key)
+ sys.exit(0)
+ else:
+ sys.stderr.write("Unknown command %r\n" % (action,))
+ sys.exit(1)
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
new file mode 100644
index 00000000..c18e0bdb
--- /dev/null
+++ b/synapse/config/_base.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import errno
+import os
+import yaml
+import sys
+from textwrap import dedent
+
+
+class ConfigError(Exception):
+ pass
+
+
+class Config(object):
+
+ stats_reporting_begging_spiel = (
+ "We would really appreciate it if you could help our project out by"
+ " reporting anonymized usage statistics from your homeserver. Only very"
+ " basic aggregate data (e.g. number of users) will be reported, but it"
+ " helps us to track the growth of the Matrix community, and helps us to"
+ " make Matrix a success, as well as to convince other networks that they"
+ " should peer with us."
+ "\nThank you."
+ )
+
+ @staticmethod
+ def parse_size(value):
+ if isinstance(value, int) or isinstance(value, long):
+ return value
+ sizes = {"K": 1024, "M": 1024 * 1024}
+ size = 1
+ suffix = value[-1]
+ if suffix in sizes:
+ value = value[:-1]
+ size = sizes[suffix]
+ return int(value) * size
+
+ @staticmethod
+ def parse_duration(value):
+ if isinstance(value, int) or isinstance(value, long):
+ return value
+ second = 1000
+ hour = 60 * 60 * second
+ day = 24 * hour
+ week = 7 * day
+ year = 365 * day
+ sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
+ size = 1
+ suffix = value[-1]
+ if suffix in sizes:
+ value = value[:-1]
+ size = sizes[suffix]
+ return int(value) * size
+
+ @staticmethod
+ def abspath(file_path):
+ return os.path.abspath(file_path) if file_path else file_path
+
+ @classmethod
+ def check_file(cls, file_path, config_name):
+ if file_path is None:
+ raise ConfigError(
+ "Missing config for %s."
+ " You must specify a path for the config file. You can "
+ "do this with the -c or --config-path option. "
+ "Adding --generate-config along with --server-name "
+ "<server name> will generate a config file at the given path."
+ % (config_name,)
+ )
+ if not os.path.exists(file_path):
+ raise ConfigError(
+ "File %s config for %s doesn't exist."
+ " Try running again with --generate-config"
+ % (file_path, config_name,)
+ )
+ return cls.abspath(file_path)
+
+ @classmethod
+ def ensure_directory(cls, dir_path):
+ dir_path = cls.abspath(dir_path)
+ try:
+ os.makedirs(dir_path)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ if not os.path.isdir(dir_path):
+ raise ConfigError(
+ "%s is not a directory" % (dir_path,)
+ )
+ return dir_path
+
+ @classmethod
+ def read_file(cls, file_path, config_name):
+ cls.check_file(file_path, config_name)
+ with open(file_path) as file_stream:
+ return file_stream.read()
+
+ @staticmethod
+ def default_path(name):
+ return os.path.abspath(os.path.join(os.path.curdir, name))
+
+ @staticmethod
+ def read_config_file(file_path):
+ with open(file_path) as file_stream:
+ return yaml.load(file_stream)
+
+ def invoke_all(self, name, *args, **kargs):
+ results = []
+ for cls in type(self).mro():
+ if name in cls.__dict__:
+ results.append(getattr(cls, name)(self, *args, **kargs))
+ return results
+
+ def generate_config(self, config_dir_path, server_name, report_stats=None):
+ default_config = "# vim:ft=yaml\n"
+
+ default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
+ "default_config",
+ config_dir_path=config_dir_path,
+ server_name=server_name,
+ report_stats=report_stats,
+ ))
+
+ config = yaml.load(default_config)
+
+ return default_config, config
+
+ @classmethod
+ def load_config(cls, description, argv, generate_section=None):
+ obj = cls()
+
+ config_parser = argparse.ArgumentParser(add_help=False)
+ config_parser.add_argument(
+ "-c", "--config-path",
+ action="append",
+ metavar="CONFIG_FILE",
+ help="Specify config file. Can be given multiple times and"
+ " may specify directories containing *.yaml files."
+ )
+ config_parser.add_argument(
+ "--generate-config",
+ action="store_true",
+ help="Generate a config file for the server name"
+ )
+ config_parser.add_argument(
+ "--report-stats",
+ action="store",
+ help="Stuff",
+ choices=["yes", "no"]
+ )
+ config_parser.add_argument(
+ "--generate-keys",
+ action="store_true",
+ help="Generate any missing key files then exit"
+ )
+ config_parser.add_argument(
+ "--keys-directory",
+ metavar="DIRECTORY",
+ help="Used with 'generate-*' options to specify where files such as"
+ " certs and signing keys should be stored in, unless explicitly"
+ " specified in the config."
+ )
+ config_parser.add_argument(
+ "-H", "--server-name",
+ help="The server name to generate a config file for"
+ )
+ config_args, remaining_args = config_parser.parse_known_args(argv)
+
+ generate_keys = config_args.generate_keys
+
+ config_files = []
+ if config_args.config_path:
+ for config_path in config_args.config_path:
+ if os.path.isdir(config_path):
+ # We accept specifying directories as config paths, we search
+ # inside that directory for all files matching *.yaml, and then
+ # we apply them in *sorted* order.
+ files = []
+ for entry in os.listdir(config_path):
+ entry_path = os.path.join(config_path, entry)
+ if not os.path.isfile(entry_path):
+ print (
+ "Found subdirectory in config directory: %r. IGNORING."
+ ) % (entry_path, )
+ continue
+
+ if not entry.endswith(".yaml"):
+ print (
+ "Found file in config directory that does not"
+ " end in '.yaml': %r. IGNORING."
+ ) % (entry_path, )
+ continue
+
+ files.append(entry_path)
+
+ config_files.extend(sorted(files))
+ else:
+ config_files.append(config_path)
+
+ if config_args.generate_config:
+ if config_args.report_stats is None:
+ config_parser.error(
+ "Please specify either --report-stats=yes or --report-stats=no\n\n" +
+ cls.stats_reporting_begging_spiel
+ )
+ if not config_files:
+ config_parser.error(
+ "Must supply a config file.\nA config file can be automatically"
+ " generated using \"--generate-config -H SERVER_NAME"
+ " -c CONFIG-FILE\""
+ )
+ (config_path,) = config_files
+ if not os.path.exists(config_path):
+ if config_args.keys_directory:
+ config_dir_path = config_args.keys_directory
+ else:
+ config_dir_path = os.path.dirname(config_path)
+ config_dir_path = os.path.abspath(config_dir_path)
+
+ server_name = config_args.server_name
+ if not server_name:
+ print "Must specify a server_name to a generate config for."
+ sys.exit(1)
+ if not os.path.exists(config_dir_path):
+ os.makedirs(config_dir_path)
+ with open(config_path, "wb") as config_file:
+ config_bytes, config = obj.generate_config(
+ config_dir_path=config_dir_path,
+ server_name=server_name,
+ report_stats=(config_args.report_stats == "yes"),
+ )
+ obj.invoke_all("generate_files", config)
+ config_file.write(config_bytes)
+ print (
+ "A config file has been generated in %r for server name"
+ " %r with corresponding SSL keys and self-signed"
+ " certificates. Please review this file and customise it"
+ " to your needs."
+ ) % (config_path, server_name)
+ print (
+ "If this server name is incorrect, you will need to"
+ " regenerate the SSL certificates"
+ )
+ sys.exit(0)
+ else:
+ print (
+ "Config file %r already exists. Generating any missing key"
+ " files."
+ ) % (config_path,)
+ generate_keys = True
+
+ parser = argparse.ArgumentParser(
+ parents=[config_parser],
+ description=description,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+
+ obj.invoke_all("add_arguments", parser)
+ args = parser.parse_args(remaining_args)
+
+ if not config_files:
+ config_parser.error(
+ "Must supply a config file.\nA config file can be automatically"
+ " generated using \"--generate-config -H SERVER_NAME"
+ " -c CONFIG-FILE\""
+ )
+
+ if config_args.keys_directory:
+ config_dir_path = config_args.keys_directory
+ else:
+ config_dir_path = os.path.dirname(config_args.config_path[-1])
+ config_dir_path = os.path.abspath(config_dir_path)
+
+ specified_config = {}
+ for config_file in config_files:
+ yaml_config = cls.read_config_file(config_file)
+ specified_config.update(yaml_config)
+
+ server_name = specified_config["server_name"]
+ _, config = obj.generate_config(
+ config_dir_path=config_dir_path,
+ server_name=server_name
+ )
+ config.pop("log_config")
+ config.update(specified_config)
+ if "report_stats" not in config:
+ sys.stderr.write(
+ "Please opt in or out of reporting anonymized homeserver usage "
+ "statistics, by setting the report_stats key in your config file "
+ " ( " + config_path + " ) " +
+ "to either True or False.\n\n" +
+ Config.stats_reporting_begging_spiel + "\n")
+ sys.exit(1)
+
+ if generate_keys:
+ obj.invoke_all("generate_files", config)
+ sys.exit(0)
+
+ obj.invoke_all("read_config", config)
+
+ obj.invoke_all("read_arguments", args)
+
+ return obj
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
new file mode 100644
index 00000000..b8d30199
--- /dev/null
+++ b/synapse/config/appservice.py
@@ -0,0 +1,27 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class AppServiceConfig(Config):
+
+ def read_config(self, config):
+ self.app_service_config_files = config.get("app_service_config_files", [])
+
+ def default_config(cls, **kwargs):
+ return """\
+ # A list of application service config file to use
+ app_service_config_files: []
+ """
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
new file mode 100644
index 00000000..dd92fcd0
--- /dev/null
+++ b/synapse/config/captcha.py
@@ -0,0 +1,47 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class CaptchaConfig(Config):
+
+ def read_config(self, config):
+ self.recaptcha_private_key = config["recaptcha_private_key"]
+ self.recaptcha_public_key = config["recaptcha_public_key"]
+ self.enable_registration_captcha = config["enable_registration_captcha"]
+ self.captcha_bypass_secret = config.get("captcha_bypass_secret")
+ self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
+
+ def default_config(self, **kwargs):
+ return """\
+ ## Captcha ##
+
+ # This Home Server's ReCAPTCHA public key.
+ recaptcha_private_key: "YOUR_PRIVATE_KEY"
+
+ # This Home Server's ReCAPTCHA private key.
+ recaptcha_public_key: "YOUR_PUBLIC_KEY"
+
+ # Enables ReCaptcha checks when registering, preventing signup
+ # unless a captcha is answered. Requires a valid ReCaptcha
+ # public/private key.
+ enable_registration_captcha: False
+
+ # A secret key used to bypass the captcha test entirely.
+ #captcha_bypass_secret: "YOUR_SECRET_HERE"
+
+ # The API endpoint to use for verifying m.login.recaptcha responses.
+ recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
+ """
diff --git a/synapse/config/cas.py b/synapse/config/cas.py
new file mode 100644
index 00000000..326e4058
--- /dev/null
+++ b/synapse/config/cas.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class CasConfig(Config):
+ """Cas Configuration
+
+ cas_server_url: URL of CAS server
+ """
+
+ def read_config(self, config):
+ cas_config = config.get("cas_config", None)
+ if cas_config:
+ self.cas_enabled = cas_config.get("enabled", True)
+ self.cas_server_url = cas_config["server_url"]
+ self.cas_service_url = cas_config["service_url"]
+ self.cas_required_attributes = cas_config.get("required_attributes", {})
+ else:
+ self.cas_enabled = False
+ self.cas_server_url = None
+ self.cas_service_url = None
+ self.cas_required_attributes = {}
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ return """
+ # Enable CAS for registration and login.
+ #cas_config:
+ # enabled: true
+ # server_url: "https://cas-server.com"
+ # service_url: "https://homesever.domain.com:8448"
+ # #required_attributes:
+ # # name: value
+ """
diff --git a/synapse/config/database.py b/synapse/config/database.py
new file mode 100644
index 00000000..baeda8f3
--- /dev/null
+++ b/synapse/config/database.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class DatabaseConfig(Config):
+
+ def read_config(self, config):
+ self.event_cache_size = self.parse_size(
+ config.get("event_cache_size", "10K")
+ )
+
+ self.database_config = config.get("database")
+
+ if self.database_config is None:
+ self.database_config = {
+ "name": "sqlite3",
+ "args": {},
+ }
+
+ name = self.database_config.get("name", None)
+ if name == "psycopg2":
+ pass
+ elif name == "sqlite3":
+ self.database_config.setdefault("args", {}).update({
+ "cp_min": 1,
+ "cp_max": 1,
+ "check_same_thread": False,
+ })
+ else:
+ raise RuntimeError("Unsupported database type '%s'" % (name,))
+
+ self.set_databasepath(config.get("database_path"))
+
+ def default_config(self, **kwargs):
+ database_path = self.abspath("homeserver.db")
+ return """\
+ # Database configuration
+ database:
+ # The database engine name
+ name: "sqlite3"
+ # Arguments to pass to the engine
+ args:
+ # Path to the database
+ database: "%(database_path)s"
+
+ # Number of events to cache in memory.
+ event_cache_size: "10K"
+ """ % locals()
+
+ def read_arguments(self, args):
+ self.set_databasepath(args.database_path)
+
+ def set_databasepath(self, database_path):
+ if database_path != ":memory:":
+ database_path = self.abspath(database_path)
+ if self.database_config.get("name", None) == "sqlite3":
+ if database_path is not None:
+ self.database_config["args"]["database"] = database_path
+
+ def add_arguments(self, parser):
+ db_group = parser.add_argument_group("database")
+ db_group.add_argument(
+ "-d", "--database-path", metavar="SQLITE_DATABASE_PATH",
+ help="The path to a sqlite database to use."
+ )
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
new file mode 100644
index 00000000..4743e6ab
--- /dev/null
+++ b/synapse/config/homeserver.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .tls import TlsConfig
+from .server import ServerConfig
+from .logger import LoggingConfig
+from .database import DatabaseConfig
+from .ratelimiting import RatelimitConfig
+from .repository import ContentRepositoryConfig
+from .captcha import CaptchaConfig
+from .voip import VoipConfig
+from .registration import RegistrationConfig
+from .metrics import MetricsConfig
+from .appservice import AppServiceConfig
+from .key import KeyConfig
+from .saml2 import SAML2Config
+from .cas import CasConfig
+from .password import PasswordConfig
+
+
+class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
+ RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
+ VoipConfig, RegistrationConfig, MetricsConfig,
+ AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
+ PasswordConfig,):
+ pass
+
+
+if __name__ == '__main__':
+ import sys
+ sys.stdout.write(
+ HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
+ )
diff --git a/synapse/config/key.py b/synapse/config/key.py
new file mode 100644
index 00000000..2c187065
--- /dev/null
+++ b/synapse/config/key.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config, ConfigError
+
+from synapse.util.stringutils import random_string
+from signedjson.key import (
+ generate_signing_key, is_signing_algorithm_supported,
+ decode_signing_key_base64, decode_verify_key_bytes,
+ read_signing_keys, write_signing_keys, NACL_ED25519
+)
+from unpaddedbase64 import decode_base64
+
+import os
+
+
+class KeyConfig(Config):
+
+ def read_config(self, config):
+ self.signing_key = self.read_signing_key(config["signing_key_path"])
+ self.old_signing_keys = self.read_old_signing_keys(
+ config["old_signing_keys"]
+ )
+ self.key_refresh_interval = self.parse_duration(
+ config["key_refresh_interval"]
+ )
+ self.perspectives = self.read_perspectives(
+ config["perspectives"]
+ )
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ base_key_name = os.path.join(config_dir_path, server_name)
+ return """\
+ ## Signing Keys ##
+
+ # Path to the signing key to sign messages with
+ signing_key_path: "%(base_key_name)s.signing.key"
+
+ # The keys that the server used to sign messages with but won't use
+ # to sign new messages. E.g. it has lost its private key
+ old_signing_keys: {}
+ # "ed25519:auto":
+ # # Base64 encoded public key
+ # key: "The public part of your old signing key."
+ # # Millisecond POSIX timestamp when the key expired.
+ # expired_ts: 123456789123
+
+ # How long key response published by this server is valid for.
+ # Used to set the valid_until_ts in /key/v2 APIs.
+ # Determines how quickly servers will query to check which keys
+ # are still valid.
+ key_refresh_interval: "1d" # 1 Day.
+
+ # The trusted servers to download signing keys from.
+ perspectives:
+ servers:
+ "matrix.org":
+ verify_keys:
+ "ed25519:auto":
+ key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+ """ % locals()
+
+ def read_perspectives(self, perspectives_config):
+ servers = {}
+ for server_name, server_config in perspectives_config["servers"].items():
+ for key_id, key_data in server_config["verify_keys"].items():
+ if is_signing_algorithm_supported(key_id):
+ key_base64 = key_data["key"]
+ key_bytes = decode_base64(key_base64)
+ verify_key = decode_verify_key_bytes(key_id, key_bytes)
+ servers.setdefault(server_name, {})[key_id] = verify_key
+ return servers
+
+ def read_signing_key(self, signing_key_path):
+ signing_keys = self.read_file(signing_key_path, "signing_key")
+ try:
+ return read_signing_keys(signing_keys.splitlines(True))
+ except Exception:
+ raise ConfigError(
+ "Error reading signing_key."
+ " Try running again with --generate-config"
+ )
+
+ def read_old_signing_keys(self, old_signing_keys):
+ keys = {}
+ for key_id, key_data in old_signing_keys.items():
+ if is_signing_algorithm_supported(key_id):
+ key_base64 = key_data["key"]
+ key_bytes = decode_base64(key_base64)
+ verify_key = decode_verify_key_bytes(key_id, key_bytes)
+ verify_key.expired_ts = key_data["expired_ts"]
+ keys[key_id] = verify_key
+ else:
+ raise ConfigError(
+ "Unsupported signing algorithm for old key: %r" % (key_id,)
+ )
+ return keys
+
+ def generate_files(self, config):
+ signing_key_path = config["signing_key_path"]
+ if not os.path.exists(signing_key_path):
+ with open(signing_key_path, "w") as signing_key_file:
+ key_id = "a_" + random_string(4)
+ write_signing_keys(
+ signing_key_file, (generate_signing_key(key_id),),
+ )
+ else:
+ signing_keys = self.read_file(signing_key_path, "signing_key")
+ if len(signing_keys.split("\n")[0].split()) == 1:
+ # handle keys in the old format.
+ key_id = "a_" + random_string(4)
+ key = decode_signing_key_base64(
+ NACL_ED25519, key_id, signing_keys.split("\n")[0]
+ )
+ with open(signing_key_path, "w") as signing_key_file:
+ write_signing_keys(
+ signing_key_file, (key,),
+ )
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
new file mode 100644
index 00000000..a13dc170
--- /dev/null
+++ b/synapse/config/logger.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+from synapse.util.logcontext import LoggingContextFilter
+from twisted.python.log import PythonLoggingObserver
+import logging
+import logging.config
+import yaml
+from string import Template
+import os
+import signal
+from synapse.util.debug import debug_deferreds
+
+
+DEFAULT_LOG_CONFIG = Template("""
+version: 1
+
+formatters:
+ precise:
+ format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
+- %(message)s'
+
+filters:
+ context:
+ (): synapse.util.logcontext.LoggingContextFilter
+ request: ""
+
+handlers:
+ file:
+ class: logging.handlers.RotatingFileHandler
+ formatter: precise
+ filename: ${log_file}
+ maxBytes: 104857600
+ backupCount: 10
+ filters: [context]
+ level: INFO
+ console:
+ class: logging.StreamHandler
+ formatter: precise
+
+loggers:
+ synapse:
+ level: INFO
+
+ synapse.storage.SQL:
+ level: INFO
+
+root:
+ level: INFO
+ handlers: [file, console]
+""")
+
+
+class LoggingConfig(Config):
+
+ def read_config(self, config):
+ self.verbosity = config.get("verbose", 0)
+ self.log_config = self.abspath(config.get("log_config"))
+ self.log_file = self.abspath(config.get("log_file"))
+ if config.get("full_twisted_stacktraces"):
+ debug_deferreds()
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ log_file = self.abspath("homeserver.log")
+ log_config = self.abspath(
+ os.path.join(config_dir_path, server_name + ".log.config")
+ )
+ return """
+ # Logging verbosity level.
+ verbose: 0
+
+ # File to write logging to
+ log_file: "%(log_file)s"
+
+ # A yaml python logging config file
+ log_config: "%(log_config)s"
+
+ # Stop twisted from discarding the stack traces of exceptions in
+ # deferreds by waiting a reactor tick before running a deferred's
+ # callbacks.
+ # full_twisted_stacktraces: true
+ """ % locals()
+
+ def read_arguments(self, args):
+ if args.verbose is not None:
+ self.verbosity = args.verbose
+ if args.log_config is not None:
+ self.log_config = args.log_config
+ if args.log_file is not None:
+ self.log_file = args.log_file
+
+ def add_arguments(cls, parser):
+ logging_group = parser.add_argument_group("logging")
+ logging_group.add_argument(
+ '-v', '--verbose', dest="verbose", action='count',
+ help="The verbosity level."
+ )
+ logging_group.add_argument(
+ '-f', '--log-file', dest="log_file",
+ help="File to log to."
+ )
+ logging_group.add_argument(
+ '--log-config', dest="log_config", default=None,
+ help="Python logging config file"
+ )
+
+ def generate_files(self, config):
+ log_config = config.get("log_config")
+ if log_config and not os.path.exists(log_config):
+ with open(log_config, "wb") as log_config_file:
+ log_config_file.write(
+ DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
+ )
+
+ def setup_logging(self):
+ log_format = (
+ "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
+ " - %(message)s"
+ )
+ if self.log_config is None:
+
+ level = logging.INFO
+ level_for_storage = logging.INFO
+ if self.verbosity:
+ level = logging.DEBUG
+ if self.verbosity > 1:
+ level_for_storage = logging.DEBUG
+
+ # FIXME: we need a logging.WARN for a -q quiet option
+ logger = logging.getLogger('')
+ logger.setLevel(level)
+
+ logging.getLogger('synapse.storage').setLevel(level_for_storage)
+
+ formatter = logging.Formatter(log_format)
+ if self.log_file:
+ # TODO: Customisable file size / backup count
+ handler = logging.handlers.RotatingFileHandler(
+ self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
+ )
+
+ def sighup(signum, stack):
+ logger.info("Closing log file due to SIGHUP")
+ handler.doRollover()
+ logger.info("Opened new log file due to SIGHUP")
+
+ # TODO(paul): obviously this is a terrible mechanism for
+ # stealing SIGHUP, because it means no other part of synapse
+ # can use it instead. If we want to catch SIGHUP anywhere
+ # else as well, I'd suggest we find a nicer way to broadcast
+ # it around.
+ if getattr(signal, "SIGHUP"):
+ signal.signal(signal.SIGHUP, sighup)
+ else:
+ handler = logging.StreamHandler()
+ handler.setFormatter(formatter)
+
+ handler.addFilter(LoggingContextFilter(request=""))
+
+ logger.addHandler(handler)
+ else:
+ with open(self.log_config, 'r') as f:
+ logging.config.dictConfig(yaml.load(f))
+
+ observer = PythonLoggingObserver()
+ observer.start()
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
new file mode 100644
index 00000000..825fec9a
--- /dev/null
+++ b/synapse/config/metrics.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class MetricsConfig(Config):
+ def read_config(self, config):
+ self.enable_metrics = config["enable_metrics"]
+ self.report_stats = config.get("report_stats", None)
+ self.metrics_port = config.get("metrics_port")
+ self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
+
+ def default_config(self, report_stats=None, **kwargs):
+ suffix = "" if report_stats is None else "report_stats: %(report_stats)s\n"
+ return ("""\
+ ## Metrics ###
+
+ # Enable collection and rendering of performance metrics
+ enable_metrics: False
+ """ + suffix) % locals()
diff --git a/synapse/config/password.py b/synapse/config/password.py
new file mode 100644
index 00000000..1a3e2784
--- /dev/null
+++ b/synapse/config/password.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class PasswordConfig(Config):
+ """Password login configuration
+ """
+
+ def read_config(self, config):
+ password_config = config.get("password_config", {})
+ self.password_enabled = password_config.get("enabled", True)
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ return """
+ # Enable password for login.
+ password_config:
+ enabled: true
+ """
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
new file mode 100644
index 00000000..611b598e
--- /dev/null
+++ b/synapse/config/ratelimiting.py
@@ -0,0 +1,58 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class RatelimitConfig(Config):
+
+ def read_config(self, config):
+ self.rc_messages_per_second = config["rc_messages_per_second"]
+ self.rc_message_burst_count = config["rc_message_burst_count"]
+
+ self.federation_rc_window_size = config["federation_rc_window_size"]
+ self.federation_rc_sleep_limit = config["federation_rc_sleep_limit"]
+ self.federation_rc_sleep_delay = config["federation_rc_sleep_delay"]
+ self.federation_rc_reject_limit = config["federation_rc_reject_limit"]
+ self.federation_rc_concurrent = config["federation_rc_concurrent"]
+
+ def default_config(self, **kwargs):
+ return """\
+ ## Ratelimiting ##
+
+ # Number of messages a client can send per second
+ rc_messages_per_second: 0.2
+
+ # Number of message a client can send before being throttled
+ rc_message_burst_count: 10.0
+
+ # The federation window size in milliseconds
+ federation_rc_window_size: 1000
+
+ # The number of federation requests from a single server in a window
+ # before the server will delay processing the request.
+ federation_rc_sleep_limit: 10
+
+ # The duration in milliseconds to delay processing events from
+ # remote servers by if they go over the sleep limit.
+ federation_rc_sleep_delay: 500
+
+ # The maximum number of concurrent federation requests allowed
+ # from a single server
+ federation_rc_reject_limit: 50
+
+ # The number of federation requests to concurrently process from a
+ # single server
+ federation_rc_concurrent: 3
+ """
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
new file mode 100644
index 00000000..dca391f7
--- /dev/null
+++ b/synapse/config/registration.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+from synapse.util.stringutils import random_string_with_symbols
+
+from distutils.util import strtobool
+
+
+class RegistrationConfig(Config):
+
+ def read_config(self, config):
+ self.disable_registration = not bool(
+ strtobool(str(config["enable_registration"]))
+ )
+ if "disable_registration" in config:
+ self.disable_registration = bool(
+ strtobool(str(config["disable_registration"]))
+ )
+
+ self.registration_shared_secret = config.get("registration_shared_secret")
+ self.macaroon_secret_key = config.get("macaroon_secret_key")
+ self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
+ self.allow_guest_access = config.get("allow_guest_access", False)
+
+ def default_config(self, **kwargs):
+ registration_shared_secret = random_string_with_symbols(50)
+ macaroon_secret_key = random_string_with_symbols(50)
+ return """\
+ ## Registration ##
+
+ # Enable registration for new users.
+ enable_registration: False
+
+ # If set, allows registration by anyone who also has the shared
+ # secret, even if registration is otherwise disabled.
+ registration_shared_secret: "%(registration_shared_secret)s"
+
+ macaroon_secret_key: "%(macaroon_secret_key)s"
+
+ # Set the number of bcrypt rounds used to generate password hash.
+ # Larger numbers increase the work factor needed to generate the hash.
+ # The default number of rounds is 12.
+ bcrypt_rounds: 12
+
+ # Allows users to register as guests without a password/email/etc, and
+ # participate in rooms hosted on this server which have been made
+ # accessible to anonymous users.
+ allow_guest_access: False
+ """ % locals()
+
+ def add_arguments(self, parser):
+ reg_group = parser.add_argument_group("registration")
+ reg_group.add_argument(
+ "--enable-registration", action="store_true", default=None,
+ help="Enable registration for new users."
+ )
+
+ def read_arguments(self, args):
+ if args.enable_registration is not None:
+ self.disable_registration = not bool(
+ strtobool(str(args.enable_registration))
+ )
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
new file mode 100644
index 00000000..2fcf8724
--- /dev/null
+++ b/synapse/config/repository.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 matrix.org
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+from collections import namedtuple
+
+ThumbnailRequirement = namedtuple(
+ "ThumbnailRequirement", ["width", "height", "method", "media_type"]
+)
+
+
+def parse_thumbnail_requirements(thumbnail_sizes):
+ """ Takes a list of dictionaries with "width", "height", and "method" keys
+ and creates a map from image media types to the thumbnail size, thumnailing
+ method, and thumbnail media type to precalculate
+
+ Args:
+ thumbnail_sizes(list): List of dicts with "width", "height", and
+ "method" keys
+ Returns:
+ Dictionary mapping from media type string to list of
+ ThumbnailRequirement tuples.
+ """
+ requirements = {}
+ for size in thumbnail_sizes:
+ width = size["width"]
+ height = size["height"]
+ method = size["method"]
+ jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg")
+ png_thumbnail = ThumbnailRequirement(width, height, method, "image/png")
+ requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail)
+ requirements.setdefault("image/gif", []).append(png_thumbnail)
+ requirements.setdefault("image/png", []).append(png_thumbnail)
+ return {
+ media_type: tuple(thumbnails)
+ for media_type, thumbnails in requirements.items()
+ }
+
+
+class ContentRepositoryConfig(Config):
+ def read_config(self, config):
+ self.max_upload_size = self.parse_size(config["max_upload_size"])
+ self.max_image_pixels = self.parse_size(config["max_image_pixels"])
+ self.media_store_path = self.ensure_directory(config["media_store_path"])
+ self.uploads_path = self.ensure_directory(config["uploads_path"])
+ self.dynamic_thumbnails = config["dynamic_thumbnails"]
+ self.thumbnail_requirements = parse_thumbnail_requirements(
+ config["thumbnail_sizes"]
+ )
+
+ def default_config(self, **kwargs):
+ media_store = self.default_path("media_store")
+ uploads_path = self.default_path("uploads")
+ return """
+ # Directory where uploaded images and attachments are stored.
+ media_store_path: "%(media_store)s"
+
+ # Directory where in-progress uploads are stored.
+ uploads_path: "%(uploads_path)s"
+
+ # The largest allowed upload size in bytes
+ max_upload_size: "10M"
+
+ # Maximum number of pixels that will be thumbnailed
+ max_image_pixels: "32M"
+
+ # Whether to generate new thumbnails on the fly to precisely match
+ # the resolution requested by the client. If true then whenever
+ # a new resolution is requested by the client the server will
+ # generate a new thumbnail. If false the server will pick a thumbnail
+ # from a precalcualted list.
+ dynamic_thumbnails: false
+
+ # List of thumbnail to precalculate when an image is uploaded.
+ thumbnail_sizes:
+ - width: 32
+ height: 32
+ method: crop
+ - width: 96
+ height: 96
+ method: crop
+ - width: 320
+ height: 240
+ method: scale
+ - width: 640
+ height: 480
+ method: scale
+ """ % locals()
diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py
new file mode 100644
index 00000000..8d7f4430
--- /dev/null
+++ b/synapse/config/saml2.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class SAML2Config(Config):
+ """SAML2 Configuration
+ Synapse uses pysaml2 libraries for providing SAML2 support
+
+ config_path: Path to the sp_conf.py configuration file
+ idp_redirect_url: Identity provider URL which will redirect
+ the user back to /login/saml2 with proper info.
+
+ sp_conf.py file is something like:
+ https://github.com/rohe/pysaml2/blob/master/example/sp-repoze/sp_conf.py.example
+
+ More information: https://pythonhosted.org/pysaml2/howto/config.html
+ """
+
+ def read_config(self, config):
+ saml2_config = config.get("saml2_config", None)
+ if saml2_config:
+ self.saml2_enabled = saml2_config.get("enabled", True)
+ self.saml2_config_path = saml2_config["config_path"]
+ self.saml2_idp_redirect_url = saml2_config["idp_redirect_url"]
+ else:
+ self.saml2_enabled = False
+ self.saml2_config_path = None
+ self.saml2_idp_redirect_url = None
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ return """
+ # Enable SAML2 for registration and login. Uses pysaml2
+ # config_path: Path to the sp_conf.py configuration file
+ # idp_redirect_url: Identity provider URL which will redirect
+ # the user back to /login/saml2 with proper info.
+ # See pysaml2 docs for format of config.
+ #saml2_config:
+ # enabled: true
+ # config_path: "%s/sp_conf.py"
+ # idp_redirect_url: "http://%s/idp"
+ """ % (config_dir_path, server_name)
diff --git a/synapse/config/server.py b/synapse/config/server.py
new file mode 100644
index 00000000..5c2d6bfe
--- /dev/null
+++ b/synapse/config/server.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class ServerConfig(Config):
+
+ def read_config(self, config):
+ self.server_name = config["server_name"]
+ self.pid_file = self.abspath(config.get("pid_file"))
+ self.web_client = config["web_client"]
+ self.web_client_location = config.get("web_client_location", None)
+ self.soft_file_limit = config["soft_file_limit"]
+ self.daemonize = config.get("daemonize")
+ self.print_pidfile = config.get("print_pidfile")
+ self.user_agent_suffix = config.get("user_agent_suffix")
+ self.use_frozen_dicts = config.get("use_frozen_dicts", True)
+
+ self.listeners = config.get("listeners", [])
+
+ bind_port = config.get("bind_port")
+ if bind_port:
+ self.listeners = []
+ bind_host = config.get("bind_host", "")
+ gzip_responses = config.get("gzip_responses", True)
+
+ names = ["client", "webclient"] if self.web_client else ["client"]
+
+ self.listeners.append({
+ "port": bind_port,
+ "bind_address": bind_host,
+ "tls": True,
+ "type": "http",
+ "resources": [
+ {
+ "names": names,
+ "compress": gzip_responses,
+ },
+ {
+ "names": ["federation"],
+ "compress": False,
+ }
+ ]
+ })
+
+ unsecure_port = config.get("unsecure_port", bind_port - 400)
+ if unsecure_port:
+ self.listeners.append({
+ "port": unsecure_port,
+ "bind_address": bind_host,
+ "tls": False,
+ "type": "http",
+ "resources": [
+ {
+ "names": names,
+ "compress": gzip_responses,
+ },
+ {
+ "names": ["federation"],
+ "compress": False,
+ }
+ ]
+ })
+
+ manhole = config.get("manhole")
+ if manhole:
+ self.listeners.append({
+ "port": manhole,
+ "bind_address": "127.0.0.1",
+ "type": "manhole",
+ })
+
+ metrics_port = config.get("metrics_port")
+ if metrics_port:
+ self.listeners.append({
+ "port": metrics_port,
+ "bind_address": config.get("metrics_bind_host", "127.0.0.1"),
+ "tls": False,
+ "type": "http",
+ "resources": [
+ {
+ "names": ["metrics"],
+ "compress": False,
+ },
+ ]
+ })
+
+ # Attempt to guess the content_addr for the v0 content repostitory
+ content_addr = config.get("content_addr")
+ if not content_addr:
+ for listener in self.listeners:
+ if listener["type"] == "http" and not listener.get("tls", False):
+ unsecure_port = listener["port"]
+ break
+ else:
+ raise RuntimeError("Could not determine 'content_addr'")
+
+ host = self.server_name
+ if ':' not in host:
+ host = "%s:%d" % (host, unsecure_port)
+ else:
+ host = host.split(':')[0]
+ host = "%s:%d" % (host, unsecure_port)
+ content_addr = "http://%s" % (host,)
+
+ self.content_addr = content_addr
+
+ def default_config(self, server_name, **kwargs):
+ if ":" in server_name:
+ bind_port = int(server_name.split(":")[1])
+ unsecure_port = bind_port - 400
+ else:
+ bind_port = 8448
+ unsecure_port = 8008
+
+ pid_file = self.abspath("homeserver.pid")
+ return """\
+ ## Server ##
+
+ # The domain name of the server, with optional explicit port.
+ # This is used by remote servers to connect to this server,
+ # e.g. matrix.org, localhost:8080, etc.
+ server_name: "%(server_name)s"
+
+ # When running as a daemon, the file to store the pid in
+ pid_file: %(pid_file)s
+
+ # Whether to serve a web client from the HTTP/HTTPS root resource.
+ web_client: True
+
+ # Set the soft limit on the number of file descriptors synapse can use
+ # Zero is used to indicate synapse should set the soft limit to the
+ # hard limit.
+ soft_file_limit: 0
+
+ # List of ports that Synapse should listen on, their purpose and their
+ # configuration.
+ listeners:
+ # Main HTTPS listener
+ # For when matrix traffic is sent directly to synapse.
+ -
+ # The port to listen for HTTPS requests on.
+ port: %(bind_port)s
+
+ # Local interface to listen on.
+ # The empty string will cause synapse to listen on all interfaces.
+ bind_address: ''
+
+ # This is a 'http' listener, allows us to specify 'resources'.
+ type: http
+
+ tls: true
+
+ # Use the X-Forwarded-For (XFF) header as the client IP and not the
+ # actual client IP.
+ x_forwarded: false
+
+ # List of HTTP resources to serve on this listener.
+ resources:
+ -
+ # List of resources to host on this listener.
+ names:
+ - client # The client-server APIs, both v1 and v2
+ - webclient # The bundled webclient.
+
+ # Should synapse compress HTTP responses to clients that support it?
+ # This should be disabled if running synapse behind a load balancer
+ # that can do automatic compression.
+ compress: true
+
+ - names: [federation] # Federation APIs
+ compress: false
+
+ # Unsecure HTTP listener,
+ # For when matrix traffic passes through loadbalancer that unwraps TLS.
+ - port: %(unsecure_port)s
+ tls: false
+ bind_address: ''
+ type: http
+
+ x_forwarded: false
+
+ resources:
+ - names: [client, webclient]
+ compress: true
+ - names: [federation]
+ compress: false
+
+ # Turn on the twisted telnet manhole service on localhost on the given
+ # port.
+ # - port: 9000
+ # bind_address: 127.0.0.1
+ # type: manhole
+ """ % locals()
+
+ def read_arguments(self, args):
+ if args.manhole is not None:
+ self.manhole = args.manhole
+ if args.daemonize is not None:
+ self.daemonize = args.daemonize
+ if args.print_pidfile is not None:
+ self.print_pidfile = args.print_pidfile
+
+ def add_arguments(self, parser):
+ server_group = parser.add_argument_group("server")
+ server_group.add_argument("-D", "--daemonize", action='store_true',
+ default=None,
+ help="Daemonize the home server")
+ server_group.add_argument("--print-pidfile", action='store_true',
+ default=None,
+ help="Print the path to the pidfile just"
+ " before daemonizing")
+ server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
+ type=int,
+ help="Turn on the twisted telnet manhole"
+ " service on the given port.")
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
new file mode 100644
index 00000000..0ac26982
--- /dev/null
+++ b/synapse/config/tls.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+from OpenSSL import crypto
+import subprocess
+import os
+
+GENERATE_DH_PARAMS = False
+
+
+class TlsConfig(Config):
+ def read_config(self, config):
+ self.tls_certificate = self.read_tls_certificate(
+ config.get("tls_certificate_path")
+ )
+ self.tls_certificate_file = config.get("tls_certificate_path")
+
+ self.no_tls = config.get("no_tls", False)
+
+ if self.no_tls:
+ self.tls_private_key = None
+ else:
+ self.tls_private_key = self.read_tls_private_key(
+ config.get("tls_private_key_path")
+ )
+
+ self.tls_dh_params_path = self.check_file(
+ config.get("tls_dh_params_path"), "tls_dh_params"
+ )
+
+ # This config option applies to non-federation HTTP clients
+ # (e.g. for talking to recaptcha, identity servers, and such)
+ # It should never be used in production, and is intended for
+ # use only when running tests.
+ self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
+ "use_insecure_ssl_client_just_for_testing_do_not_use"
+ )
+
+ def default_config(self, config_dir_path, server_name, **kwargs):
+ base_key_name = os.path.join(config_dir_path, server_name)
+
+ tls_certificate_path = base_key_name + ".tls.crt"
+ tls_private_key_path = base_key_name + ".tls.key"
+ tls_dh_params_path = base_key_name + ".tls.dh"
+
+ return """\
+ # PEM encoded X509 certificate for TLS.
+ # You can replace the self-signed certificate that synapse
+ # autogenerates on launch with your own SSL certificate + key pair
+ # if you like. Any required intermediary certificates can be
+ # appended after the primary certificate in hierarchical order.
+ tls_certificate_path: "%(tls_certificate_path)s"
+
+ # PEM encoded private key for TLS
+ tls_private_key_path: "%(tls_private_key_path)s"
+
+ # PEM dh parameters for ephemeral keys
+ tls_dh_params_path: "%(tls_dh_params_path)s"
+
+ # Don't bind to the https port
+ no_tls: False
+ """ % locals()
+
+ def read_tls_certificate(self, cert_path):
+ cert_pem = self.read_file(cert_path, "tls_certificate")
+ return crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
+
+ def read_tls_private_key(self, private_key_path):
+ private_key_pem = self.read_file(private_key_path, "tls_private_key")
+ return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
+
+ def generate_files(self, config):
+ tls_certificate_path = config["tls_certificate_path"]
+ tls_private_key_path = config["tls_private_key_path"]
+ tls_dh_params_path = config["tls_dh_params_path"]
+
+ if not os.path.exists(tls_private_key_path):
+ with open(tls_private_key_path, "w") as private_key_file:
+ tls_private_key = crypto.PKey()
+ tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
+ private_key_pem = crypto.dump_privatekey(
+ crypto.FILETYPE_PEM, tls_private_key
+ )
+ private_key_file.write(private_key_pem)
+ else:
+ with open(tls_private_key_path) as private_key_file:
+ private_key_pem = private_key_file.read()
+ tls_private_key = crypto.load_privatekey(
+ crypto.FILETYPE_PEM, private_key_pem
+ )
+
+ if not os.path.exists(tls_certificate_path):
+ with open(tls_certificate_path, "w") as certificate_file:
+ cert = crypto.X509()
+ subject = cert.get_subject()
+ subject.CN = config["server_name"]
+
+ cert.set_serial_number(1000)
+ cert.gmtime_adj_notBefore(0)
+ cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
+ cert.set_issuer(cert.get_subject())
+ cert.set_pubkey(tls_private_key)
+
+ cert.sign(tls_private_key, 'sha256')
+
+ cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
+
+ certificate_file.write(cert_pem)
+
+ if not os.path.exists(tls_dh_params_path):
+ if GENERATE_DH_PARAMS:
+ subprocess.check_call([
+ "openssl", "dhparam",
+ "-outform", "PEM",
+ "-out", tls_dh_params_path,
+ "2048"
+ ])
+ else:
+ with open(tls_dh_params_path, "w") as dh_params_file:
+ dh_params_file.write(
+ "2048-bit DH parameters taken from rfc3526\n"
+ "-----BEGIN DH PARAMETERS-----\n"
+ "MIIBCAKCAQEA///////////JD9qiIWjC"
+ "NMTGYouA3BzRKQJOCIpnzHQCC76mOxOb\n"
+ "IlFKCHmONATd75UZs806QxswKwpt8l8U"
+ "N0/hNW1tUcJF5IW1dmJefsb0TELppjft\n"
+ "awv/XLb0Brft7jhr+1qJn6WunyQRfEsf"
+ "5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT\n"
+ "mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVS"
+ "u57VKQdwlpZtZww1Tkq8mATxdGwIyhgh\n"
+ "fDKQXkYuNs474553LBgOhgObJ4Oi7Aei"
+ "j7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq\n"
+ "5RXSJhiY+gUQFXKOWoqsqmj/////////"
+ "/wIBAg==\n"
+ "-----END DH PARAMETERS-----\n"
+ )
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
new file mode 100644
index 00000000..a093354c
--- /dev/null
+++ b/synapse/config/voip.py
@@ -0,0 +1,37 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class VoipConfig(Config):
+
+ def read_config(self, config):
+ self.turn_uris = config.get("turn_uris", [])
+ self.turn_shared_secret = config["turn_shared_secret"]
+ self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
+
+ def default_config(self, **kwargs):
+ return """\
+ ## Turn ##
+
+ # The public URIs of the TURN server to give to clients
+ turn_uris: []
+
+ # The shared secret used to compute passwords for the TURN server
+ turn_shared_secret: "YOUR_SHARED_SECRET"
+
+ # How long generated TURN credentials last
+ turn_user_lifetime: "1h"
+ """
diff --git a/synapse/crypto/__init__.py b/synapse/crypto/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/crypto/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
new file mode 100644
index 00000000..c4390f3b
--- /dev/null
+++ b/synapse/crypto/context_factory.py
@@ -0,0 +1,49 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import ssl
+from OpenSSL import SSL
+from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class ServerContextFactory(ssl.ContextFactory):
+ """Factory for PyOpenSSL SSL contexts that are used to handle incoming
+ connections and to make connections to remote servers."""
+
+ def __init__(self, config):
+ self._context = SSL.Context(SSL.SSLv23_METHOD)
+ self.configure_context(self._context, config)
+
+ @staticmethod
+ def configure_context(context, config):
+ try:
+ _ecCurve = _OpenSSLECCurve(_defaultCurveName)
+ _ecCurve.addECKeyToContext(context)
+ except:
+ logger.exception("Failed to enable elliptic curve for TLS")
+ context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
+ context.use_certificate_chain_file(config.tls_certificate_file)
+
+ if not config.no_tls:
+ context.use_privatekey(config.tls_private_key)
+
+ context.load_tmp_dh(config.tls_dh_params_path)
+ context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
+
+ def getContext(self):
+ return self._context
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
new file mode 100644
index 00000000..64e40864
--- /dev/null
+++ b/synapse/crypto/event_signing.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.api.errors import SynapseError, Codes
+from synapse.events.utils import prune_event
+
+from canonicaljson import encode_canonical_json
+from unpaddedbase64 import encode_base64, decode_base64
+from signedjson.sign import sign_json
+
+import hashlib
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
+ """Check whether the hash for this PDU matches the contents"""
+ name, expected_hash = compute_content_hash(event, hash_algorithm)
+ logger.debug("Expecting hash: %s", encode_base64(expected_hash))
+ if name not in event.hashes:
+ raise SynapseError(
+ 400,
+ "Algorithm %s not in hashes %s" % (
+ name, list(event.hashes),
+ ),
+ Codes.UNAUTHORIZED,
+ )
+ message_hash_base64 = event.hashes[name]
+ try:
+ message_hash_bytes = decode_base64(message_hash_base64)
+ except:
+ raise SynapseError(
+ 400,
+ "Invalid base64: %s" % (message_hash_base64,),
+ Codes.UNAUTHORIZED,
+ )
+ return message_hash_bytes == expected_hash
+
+
+def compute_content_hash(event, hash_algorithm):
+ event_json = event.get_pdu_json()
+ event_json.pop("age_ts", None)
+ event_json.pop("unsigned", None)
+ event_json.pop("signatures", None)
+ event_json.pop("hashes", None)
+ event_json.pop("outlier", None)
+ event_json.pop("destinations", None)
+
+ event_json_bytes = encode_canonical_json(event_json)
+
+ hashed = hash_algorithm(event_json_bytes)
+ return (hashed.name, hashed.digest())
+
+
+def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
+ tmp_event = prune_event(event)
+ event_json = tmp_event.get_pdu_json()
+ event_json.pop("signatures", None)
+ event_json.pop("age_ts", None)
+ event_json.pop("unsigned", None)
+ event_json_bytes = encode_canonical_json(event_json)
+ hashed = hash_algorithm(event_json_bytes)
+ return (hashed.name, hashed.digest())
+
+
+def compute_event_signature(event, signature_name, signing_key):
+ tmp_event = prune_event(event)
+ redact_json = tmp_event.get_pdu_json()
+ redact_json.pop("age_ts", None)
+ redact_json.pop("unsigned", None)
+ logger.debug("Signing event: %s", encode_canonical_json(redact_json))
+ redact_json = sign_json(redact_json, signature_name, signing_key)
+ logger.debug("Signed event: %s", encode_canonical_json(redact_json))
+ return redact_json["signatures"]
+
+
+def add_hashes_and_signatures(event, signature_name, signing_key,
+ hash_algorithm=hashlib.sha256):
+ # if hasattr(event, "old_state_events"):
+ # state_json_bytes = encode_canonical_json(
+ # [e.event_id for e in event.old_state_events.values()]
+ # )
+ # hashed = hash_algorithm(state_json_bytes)
+ # event.state_hash = {
+ # hashed.name: encode_base64(hashed.digest())
+ # }
+
+ name, digest = compute_content_hash(event, hash_algorithm=hash_algorithm)
+
+ if not hasattr(event, "hashes"):
+ event.hashes = {}
+ event.hashes[name] = encode_base64(digest)
+
+ event.signatures = compute_event_signature(
+ event,
+ signature_name=signature_name,
+ signing_key=signing_key,
+ )
diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py
new file mode 100644
index 00000000..24f15f31
--- /dev/null
+++ b/synapse/crypto/keyclient.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.web.http import HTTPClient
+from twisted.internet.protocol import Factory
+from twisted.internet import defer, reactor
+from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.util.logcontext import (
+ preserve_context_over_fn, preserve_context_over_deferred
+)
+import simplejson as json
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+KEY_API_V1 = b"/_matrix/key/v1/"
+
+
+@defer.inlineCallbacks
+def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
+ """Fetch the keys for a remote server."""
+
+ factory = SynapseKeyClientFactory()
+ factory.path = path
+ endpoint = matrix_federation_endpoint(
+ reactor, server_name, ssl_context_factory, timeout=30
+ )
+
+ for i in range(5):
+ try:
+ protocol = yield preserve_context_over_fn(
+ endpoint.connect, factory
+ )
+ server_response, server_certificate = yield preserve_context_over_deferred(
+ protocol.remote_key
+ )
+ defer.returnValue((server_response, server_certificate))
+ return
+ except SynapseKeyClientError as e:
+ logger.exception("Error getting key for %r" % (server_name,))
+ if e.status.startswith("4"):
+ # Don't retry for 4xx responses.
+ raise IOError("Cannot get key for %r" % server_name)
+ except Exception as e:
+ logger.exception(e)
+ raise IOError("Cannot get key for %r" % server_name)
+
+
+class SynapseKeyClientError(Exception):
+ """The key wasn't retrieved from the remote server."""
+ status = None
+ pass
+
+
+class SynapseKeyClientProtocol(HTTPClient):
+ """Low level HTTPS client which retrieves an application/json response from
+ the server and extracts the X.509 certificate for the remote peer from the
+ SSL connection."""
+
+ timeout = 30
+
+ def __init__(self):
+ self.remote_key = defer.Deferred()
+ self.host = None
+
+ def connectionMade(self):
+ self.host = self.transport.getHost()
+ logger.debug("Connected to %s", self.host)
+ self.sendCommand(b"GET", self.path)
+ self.endHeaders()
+ self.timer = reactor.callLater(
+ self.timeout,
+ self.on_timeout
+ )
+
+ def errback(self, error):
+ if not self.remote_key.called:
+ self.remote_key.errback(error)
+
+ def callback(self, result):
+ if not self.remote_key.called:
+ self.remote_key.callback(result)
+
+ def handleStatus(self, version, status, message):
+ if status != b"200":
+ # logger.info("Non-200 response from %s: %s %s",
+ # self.transport.getHost(), status, message)
+ error = SynapseKeyClientError(
+ "Non-200 response %r from %r" % (status, self.host)
+ )
+ error.status = status
+ self.errback(error)
+ self.transport.abortConnection()
+
+ def handleResponse(self, response_body_bytes):
+ try:
+ json_response = json.loads(response_body_bytes)
+ except ValueError:
+ # logger.info("Invalid JSON response from %s",
+ # self.transport.getHost())
+ self.transport.abortConnection()
+ return
+
+ certificate = self.transport.getPeerCertificate()
+ self.callback((json_response, certificate))
+ self.transport.abortConnection()
+ self.timer.cancel()
+
+ def on_timeout(self):
+ logger.debug("Timeout waiting for response from %s", self.host)
+ self.errback(IOError("Timeout waiting for response"))
+ self.transport.abortConnection()
+
+
+class SynapseKeyClientFactory(Factory):
+ def protocol(self):
+ protocol = SynapseKeyClientProtocol()
+ protocol.path = self.path
+ return protocol
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
new file mode 100644
index 00000000..8b6a5986
--- /dev/null
+++ b/synapse/crypto/keyring.py
@@ -0,0 +1,686 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.crypto.keyclient import fetch_server_key
+from synapse.api.errors import SynapseError, Codes
+from synapse.util.retryutils import get_retry_limiter
+from synapse.util import unwrapFirstError
+from synapse.util.async import ObservableDeferred
+
+from twisted.internet import defer
+
+from signedjson.sign import (
+ verify_signed_json, signature_ids, sign_json, encode_canonical_json
+)
+from signedjson.key import (
+ is_signing_algorithm_supported, decode_verify_key_bytes
+)
+from unpaddedbase64 import decode_base64, encode_base64
+
+from OpenSSL import crypto
+
+from collections import namedtuple
+import urllib
+import hashlib
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
+
+
+class Keyring(object):
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.client = hs.get_http_client()
+ self.config = hs.get_config()
+ self.perspective_servers = self.config.perspectives
+ self.hs = hs
+
+ self.key_downloads = {}
+
+ def verify_json_for_server(self, server_name, json_object):
+ return self.verify_json_objects_for_server(
+ [(server_name, json_object)]
+ )[0]
+
+ def verify_json_objects_for_server(self, server_and_json):
+ """Bulk verfies signatures of json objects, bulk fetching keys as
+ necessary.
+
+ Args:
+ server_and_json (list): List of pairs of (server_name, json_object)
+
+ Returns:
+ list of deferreds indicating success or failure to verify each
+ json object's signature for the given server_name.
+ """
+ group_id_to_json = {}
+ group_id_to_group = {}
+ group_ids = []
+
+ next_group_id = 0
+ deferreds = {}
+
+ for server_name, json_object in server_and_json:
+ logger.debug("Verifying for %s", server_name)
+ group_id = next_group_id
+ next_group_id += 1
+ group_ids.append(group_id)
+
+ key_ids = signature_ids(json_object, server_name)
+ if not key_ids:
+ deferreds[group_id] = defer.fail(SynapseError(
+ 400,
+ "Not signed with a supported algorithm",
+ Codes.UNAUTHORIZED,
+ ))
+ else:
+ deferreds[group_id] = defer.Deferred()
+
+ group = KeyGroup(server_name, group_id, key_ids)
+
+ group_id_to_group[group_id] = group
+ group_id_to_json[group_id] = json_object
+
+ @defer.inlineCallbacks
+ def handle_key_deferred(group, deferred):
+ server_name = group.server_name
+ try:
+ _, _, key_id, verify_key = yield deferred
+ except IOError as e:
+ logger.warn(
+ "Got IOError when downloading keys for %s: %s %s",
+ server_name, type(e).__name__, str(e.message),
+ )
+ raise SynapseError(
+ 502,
+ "Error downloading keys for %s" % (server_name,),
+ Codes.UNAUTHORIZED,
+ )
+ except Exception as e:
+ logger.exception(
+ "Got Exception when downloading keys for %s: %s %s",
+ server_name, type(e).__name__, str(e.message),
+ )
+ raise SynapseError(
+ 401,
+ "No key for %s with id %s" % (server_name, key_ids),
+ Codes.UNAUTHORIZED,
+ )
+
+ json_object = group_id_to_json[group.group_id]
+
+ try:
+ verify_signed_json(json_object, server_name, verify_key)
+ except:
+ raise SynapseError(
+ 401,
+ "Invalid signature for server %s with key %s:%s" % (
+ server_name, verify_key.alg, verify_key.version
+ ),
+ Codes.UNAUTHORIZED,
+ )
+
+ server_to_deferred = {
+ server_name: defer.Deferred()
+ for server_name, _ in server_and_json
+ }
+
+ # We want to wait for any previous lookups to complete before
+ # proceeding.
+ wait_on_deferred = self.wait_for_previous_lookups(
+ [server_name for server_name, _ in server_and_json],
+ server_to_deferred,
+ )
+
+ # Actually start fetching keys.
+ wait_on_deferred.addBoth(
+ lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
+ )
+
+ # When we've finished fetching all the keys for a given server_name,
+ # resolve the deferred passed to `wait_for_previous_lookups` so that
+ # any lookups waiting will proceed.
+ server_to_gids = {}
+
+ def remove_deferreds(res, server_name, group_id):
+ server_to_gids[server_name].discard(group_id)
+ if not server_to_gids[server_name]:
+ d = server_to_deferred.pop(server_name, None)
+ if d:
+ d.callback(None)
+ return res
+
+ for g_id, deferred in deferreds.items():
+ server_name = group_id_to_group[g_id].server_name
+ server_to_gids.setdefault(server_name, set()).add(g_id)
+ deferred.addBoth(remove_deferreds, server_name, g_id)
+
+ # Pass those keys to handle_key_deferred so that the json object
+ # signatures can be verified
+ return [
+ handle_key_deferred(
+ group_id_to_group[g_id],
+ deferreds[g_id],
+ )
+ for g_id in group_ids
+ ]
+
+ @defer.inlineCallbacks
+ def wait_for_previous_lookups(self, server_names, server_to_deferred):
+ """Waits for any previous key lookups for the given servers to finish.
+
+ Args:
+ server_names (list): list of server_names we want to lookup
+ server_to_deferred (dict): server_name to deferred which gets
+ resolved once we've finished looking up keys for that server
+ """
+ while True:
+ wait_on = [
+ self.key_downloads[server_name]
+ for server_name in server_names
+ if server_name in self.key_downloads
+ ]
+ if wait_on:
+ yield defer.DeferredList(wait_on)
+ else:
+ break
+
+ for server_name, deferred in server_to_deferred.items():
+ d = ObservableDeferred(deferred)
+ self.key_downloads[server_name] = d
+
+ def rm(r, server_name):
+ self.key_downloads.pop(server_name, None)
+ return r
+
+ d.addBoth(rm, server_name)
+
+ def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
+ """Takes a dict of KeyGroups and tries to find at least one key for
+ each group.
+ """
+
+ # These are functions that produce keys given a list of key ids
+ key_fetch_fns = (
+ self.get_keys_from_store, # First try the local store
+ self.get_keys_from_perspectives, # Then try via perspectives
+ self.get_keys_from_server, # Then try directly
+ )
+
+ @defer.inlineCallbacks
+ def do_iterations():
+ merged_results = {}
+
+ missing_keys = {}
+ for group in group_id_to_group.values():
+ missing_keys.setdefault(group.server_name, set()).union(group.key_ids)
+
+ for fn in key_fetch_fns:
+ results = yield fn(missing_keys.items())
+ merged_results.update(results)
+
+ # We now need to figure out which groups we have keys for
+ # and which we don't
+ missing_groups = {}
+ for group in group_id_to_group.values():
+ for key_id in group.key_ids:
+ if key_id in merged_results[group.server_name]:
+ group_id_to_deferred[group.group_id].callback((
+ group.group_id,
+ group.server_name,
+ key_id,
+ merged_results[group.server_name][key_id],
+ ))
+ break
+ else:
+ missing_groups.setdefault(
+ group.server_name, []
+ ).append(group)
+
+ if not missing_groups:
+ break
+
+ missing_keys = {
+ server_name: set(
+ key_id for group in groups for key_id in group.key_ids
+ )
+ for server_name, groups in missing_groups.items()
+ }
+
+ for group in missing_groups.values():
+ group_id_to_deferred[group.group_id].errback(SynapseError(
+ 401,
+ "No key for %s with id %s" % (
+ group.server_name, group.key_ids,
+ ),
+ Codes.UNAUTHORIZED,
+ ))
+
+ def on_err(err):
+ for deferred in group_id_to_deferred.values():
+ if not deferred.called:
+ deferred.errback(err)
+
+ do_iterations().addErrback(on_err)
+
+ return group_id_to_deferred
+
+ @defer.inlineCallbacks
+ def get_keys_from_store(self, server_name_and_key_ids):
+ res = yield defer.gatherResults(
+ [
+ self.store.get_server_verify_keys(
+ server_name, key_ids
+ ).addCallback(lambda ks, server: (server, ks), server_name)
+ for server_name, key_ids in server_name_and_key_ids
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ defer.returnValue(dict(res))
+
+ @defer.inlineCallbacks
+ def get_keys_from_perspectives(self, server_name_and_key_ids):
+ @defer.inlineCallbacks
+ def get_key(perspective_name, perspective_keys):
+ try:
+ result = yield self.get_server_verify_key_v2_indirect(
+ server_name_and_key_ids, perspective_name, perspective_keys
+ )
+ defer.returnValue(result)
+ except Exception as e:
+ logger.exception(
+ "Unable to get key from %r: %s %s",
+ perspective_name,
+ type(e).__name__, str(e.message),
+ )
+ defer.returnValue({})
+
+ results = yield defer.gatherResults(
+ [
+ get_key(p_name, p_keys)
+ for p_name, p_keys in self.perspective_servers.items()
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ union_of_keys = {}
+ for result in results:
+ for server_name, keys in result.items():
+ union_of_keys.setdefault(server_name, {}).update(keys)
+
+ defer.returnValue(union_of_keys)
+
+ @defer.inlineCallbacks
+ def get_keys_from_server(self, server_name_and_key_ids):
+ @defer.inlineCallbacks
+ def get_key(server_name, key_ids):
+ limiter = yield get_retry_limiter(
+ server_name,
+ self.clock,
+ self.store,
+ )
+ with limiter:
+ keys = None
+ try:
+ keys = yield self.get_server_verify_key_v2_direct(
+ server_name, key_ids
+ )
+ except Exception as e:
+ logger.info(
+ "Unable to getting key %r for %r directly: %s %s",
+ key_ids, server_name,
+ type(e).__name__, str(e.message),
+ )
+
+ if not keys:
+ keys = yield self.get_server_verify_key_v1_direct(
+ server_name, key_ids
+ )
+
+ keys = {server_name: keys}
+
+ defer.returnValue(keys)
+
+ results = yield defer.gatherResults(
+ [
+ get_key(server_name, key_ids)
+ for server_name, key_ids in server_name_and_key_ids
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ merged = {}
+ for result in results:
+ merged.update(result)
+
+ defer.returnValue({
+ server_name: keys
+ for server_name, keys in merged.items()
+ if keys
+ })
+
+ @defer.inlineCallbacks
+ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
+ perspective_name,
+ perspective_keys):
+ limiter = yield get_retry_limiter(
+ perspective_name, self.clock, self.store
+ )
+
+ with limiter:
+ # TODO(mark): Set the minimum_valid_until_ts to that needed by
+ # the events being validated or the current time if validating
+ # an incoming request.
+ query_response = yield self.client.post_json(
+ destination=perspective_name,
+ path=b"/_matrix/key/v2/query",
+ data={
+ u"server_keys": {
+ server_name: {
+ key_id: {
+ u"minimum_valid_until_ts": 0
+ } for key_id in key_ids
+ }
+ for server_name, key_ids in server_names_and_key_ids
+ }
+ },
+ )
+
+ keys = {}
+
+ responses = query_response["server_keys"]
+
+ for response in responses:
+ if (u"signatures" not in response
+ or perspective_name not in response[u"signatures"]):
+ raise ValueError(
+ "Key response not signed by perspective server"
+ " %r" % (perspective_name,)
+ )
+
+ verified = False
+ for key_id in response[u"signatures"][perspective_name]:
+ if key_id in perspective_keys:
+ verify_signed_json(
+ response,
+ perspective_name,
+ perspective_keys[key_id]
+ )
+ verified = True
+
+ if not verified:
+ logging.info(
+ "Response from perspective server %r not signed with a"
+ " known key, signed with: %r, known keys: %r",
+ perspective_name,
+ list(response[u"signatures"][perspective_name]),
+ list(perspective_keys)
+ )
+ raise ValueError(
+ "Response not signed with a known key for perspective"
+ " server %r" % (perspective_name,)
+ )
+
+ processed_response = yield self.process_v2_response(
+ perspective_name, response
+ )
+
+ for server_name, response_keys in processed_response.items():
+ keys.setdefault(server_name, {}).update(response_keys)
+
+ yield defer.gatherResults(
+ [
+ self.store_keys(
+ server_name=server_name,
+ from_server=perspective_name,
+ verify_keys=response_keys,
+ )
+ for server_name, response_keys in keys.items()
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ defer.returnValue(keys)
+
+ @defer.inlineCallbacks
+ def get_server_verify_key_v2_direct(self, server_name, key_ids):
+ keys = {}
+
+ for requested_key_id in key_ids:
+ if requested_key_id in keys:
+ continue
+
+ (response, tls_certificate) = yield fetch_server_key(
+ server_name, self.hs.tls_server_context_factory,
+ path=(b"/_matrix/key/v2/server/%s" % (
+ urllib.quote(requested_key_id),
+ )).encode("ascii"),
+ )
+
+ if (u"signatures" not in response
+ or server_name not in response[u"signatures"]):
+ raise ValueError("Key response not signed by remote server")
+
+ if "tls_fingerprints" not in response:
+ raise ValueError("Key response missing TLS fingerprints")
+
+ certificate_bytes = crypto.dump_certificate(
+ crypto.FILETYPE_ASN1, tls_certificate
+ )
+ sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
+ sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
+
+ response_sha256_fingerprints = set()
+ for fingerprint in response[u"tls_fingerprints"]:
+ if u"sha256" in fingerprint:
+ response_sha256_fingerprints.add(fingerprint[u"sha256"])
+
+ if sha256_fingerprint_b64 not in response_sha256_fingerprints:
+ raise ValueError("TLS certificate not allowed by fingerprints")
+
+ response_keys = yield self.process_v2_response(
+ from_server=server_name,
+ requested_ids=[requested_key_id],
+ response_json=response,
+ )
+
+ keys.update(response_keys)
+
+ yield defer.gatherResults(
+ [
+ self.store_keys(
+ server_name=key_server_name,
+ from_server=server_name,
+ verify_keys=verify_keys,
+ )
+ for key_server_name, verify_keys in keys.items()
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ defer.returnValue(keys)
+
+ @defer.inlineCallbacks
+ def process_v2_response(self, from_server, response_json,
+ requested_ids=[]):
+ time_now_ms = self.clock.time_msec()
+ response_keys = {}
+ verify_keys = {}
+ for key_id, key_data in response_json["verify_keys"].items():
+ if is_signing_algorithm_supported(key_id):
+ key_base64 = key_data["key"]
+ key_bytes = decode_base64(key_base64)
+ verify_key = decode_verify_key_bytes(key_id, key_bytes)
+ verify_key.time_added = time_now_ms
+ verify_keys[key_id] = verify_key
+
+ old_verify_keys = {}
+ for key_id, key_data in response_json["old_verify_keys"].items():
+ if is_signing_algorithm_supported(key_id):
+ key_base64 = key_data["key"]
+ key_bytes = decode_base64(key_base64)
+ verify_key = decode_verify_key_bytes(key_id, key_bytes)
+ verify_key.expired = key_data["expired_ts"]
+ verify_key.time_added = time_now_ms
+ old_verify_keys[key_id] = verify_key
+
+ results = {}
+ server_name = response_json["server_name"]
+ for key_id in response_json["signatures"].get(server_name, {}):
+ if key_id not in response_json["verify_keys"]:
+ raise ValueError(
+ "Key response must include verification keys for all"
+ " signatures"
+ )
+ if key_id in verify_keys:
+ verify_signed_json(
+ response_json,
+ server_name,
+ verify_keys[key_id]
+ )
+
+ signed_key_json = sign_json(
+ response_json,
+ self.config.server_name,
+ self.config.signing_key[0],
+ )
+
+ signed_key_json_bytes = encode_canonical_json(signed_key_json)
+ ts_valid_until_ms = signed_key_json[u"valid_until_ts"]
+
+ updated_key_ids = set(requested_ids)
+ updated_key_ids.update(verify_keys)
+ updated_key_ids.update(old_verify_keys)
+
+ response_keys.update(verify_keys)
+ response_keys.update(old_verify_keys)
+
+ yield defer.gatherResults(
+ [
+ self.store.store_server_keys_json(
+ server_name=server_name,
+ key_id=key_id,
+ from_server=server_name,
+ ts_now_ms=time_now_ms,
+ ts_expires_ms=ts_valid_until_ms,
+ key_json_bytes=signed_key_json_bytes,
+ )
+ for key_id in updated_key_ids
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ results[server_name] = response_keys
+
+ defer.returnValue(results)
+
+ @defer.inlineCallbacks
+ def get_server_verify_key_v1_direct(self, server_name, key_ids):
+ """Finds a verification key for the server with one of the key ids.
+ Args:
+ server_name (str): The name of the server to fetch a key for.
+ keys_ids (list of str): The key_ids to check for.
+ """
+
+ # Try to fetch the key from the remote server.
+
+ (response, tls_certificate) = yield fetch_server_key(
+ server_name, self.hs.tls_server_context_factory
+ )
+
+ # Check the response.
+
+ x509_certificate_bytes = crypto.dump_certificate(
+ crypto.FILETYPE_ASN1, tls_certificate
+ )
+
+ if ("signatures" not in response
+ or server_name not in response["signatures"]):
+ raise ValueError("Key response not signed by remote server")
+
+ if "tls_certificate" not in response:
+ raise ValueError("Key response missing TLS certificate")
+
+ tls_certificate_b64 = response["tls_certificate"]
+
+ if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
+ raise ValueError("TLS certificate doesn't match")
+
+ # Cache the result in the datastore.
+
+ time_now_ms = self.clock.time_msec()
+
+ verify_keys = {}
+ for key_id, key_base64 in response["verify_keys"].items():
+ if is_signing_algorithm_supported(key_id):
+ key_bytes = decode_base64(key_base64)
+ verify_key = decode_verify_key_bytes(key_id, key_bytes)
+ verify_key.time_added = time_now_ms
+ verify_keys[key_id] = verify_key
+
+ for key_id in response["signatures"][server_name]:
+ if key_id not in response["verify_keys"]:
+ raise ValueError(
+ "Key response must include verification keys for all"
+ " signatures"
+ )
+ if key_id in verify_keys:
+ verify_signed_json(
+ response,
+ server_name,
+ verify_keys[key_id]
+ )
+
+ yield self.store.store_server_certificate(
+ server_name,
+ server_name,
+ time_now_ms,
+ tls_certificate,
+ )
+
+ yield self.store_keys(
+ server_name=server_name,
+ from_server=server_name,
+ verify_keys=verify_keys,
+ )
+
+ defer.returnValue(verify_keys)
+
+ @defer.inlineCallbacks
+ def store_keys(self, server_name, from_server, verify_keys):
+ """Store a collection of verify keys for a given server
+ Args:
+ server_name(str): The name of the server the keys are for.
+ from_server(str): The server the keys were downloaded from.
+ verify_keys(dict): A mapping of key_id to VerifyKey.
+ Returns:
+ A deferred that completes when the keys are stored.
+ """
+ # TODO(markjh): Store whether the keys have expired.
+ yield defer.gatherResults(
+ [
+ self.store.store_server_verify_key(
+ server_name, server_name, key.time_added, key
+ )
+ for key_id, key in verify_keys.items()
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
new file mode 100644
index 00000000..3fb4b5e7
--- /dev/null
+++ b/synapse/events/__init__.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.util.frozenutils import freeze
+
+
+# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
+# bugs where we accidentally share e.g. signature dicts. However, converting
+# a dict to frozen_dicts is expensive.
+USE_FROZEN_DICTS = True
+
+
+class _EventInternalMetadata(object):
+ def __init__(self, internal_metadata_dict):
+ self.__dict__ = dict(internal_metadata_dict)
+
+ def get_dict(self):
+ return dict(self.__dict__)
+
+ def is_outlier(self):
+ return hasattr(self, "outlier") and self.outlier
+
+
+def _event_dict_property(key):
+ def getter(self):
+ return self._event_dict[key]
+
+ def setter(self, v):
+ self._event_dict[key] = v
+
+ def delete(self):
+ del self._event_dict[key]
+
+ return property(
+ getter,
+ setter,
+ delete,
+ )
+
+
+class EventBase(object):
+ def __init__(self, event_dict, signatures={}, unsigned={},
+ internal_metadata_dict={}, rejected_reason=None):
+ self.signatures = signatures
+ self.unsigned = unsigned
+ self.rejected_reason = rejected_reason
+
+ self._event_dict = event_dict
+
+ self.internal_metadata = _EventInternalMetadata(
+ internal_metadata_dict
+ )
+
+ auth_events = _event_dict_property("auth_events")
+ depth = _event_dict_property("depth")
+ content = _event_dict_property("content")
+ event_id = _event_dict_property("event_id")
+ hashes = _event_dict_property("hashes")
+ origin = _event_dict_property("origin")
+ origin_server_ts = _event_dict_property("origin_server_ts")
+ prev_events = _event_dict_property("prev_events")
+ prev_state = _event_dict_property("prev_state")
+ redacts = _event_dict_property("redacts")
+ room_id = _event_dict_property("room_id")
+ sender = _event_dict_property("sender")
+ state_key = _event_dict_property("state_key")
+ type = _event_dict_property("type")
+ user_id = _event_dict_property("sender")
+
+ @property
+ def membership(self):
+ return self.content["membership"]
+
+ def is_state(self):
+ return hasattr(self, "state_key") and self.state_key is not None
+
+ def get_dict(self):
+ d = dict(self._event_dict)
+ d.update({
+ "signatures": self.signatures,
+ "unsigned": dict(self.unsigned),
+ })
+
+ return d
+
+ def get(self, key, default):
+ return self._event_dict.get(key, default)
+
+ def get_internal_metadata_dict(self):
+ return self.internal_metadata.get_dict()
+
+ def get_pdu_json(self, time_now=None):
+ pdu_json = self.get_dict()
+
+ if time_now is not None and "age_ts" in pdu_json["unsigned"]:
+ age = time_now - pdu_json["unsigned"]["age_ts"]
+ pdu_json.setdefault("unsigned", {})["age"] = int(age)
+ del pdu_json["unsigned"]["age_ts"]
+
+ # This may be a frozen event
+ pdu_json["unsigned"].pop("redacted_because", None)
+
+ return pdu_json
+
+ def __set__(self, instance, value):
+ raise AttributeError("Unrecognized attribute %s" % (instance,))
+
+
+class FrozenEvent(EventBase):
+ def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
+ event_dict = dict(event_dict)
+
+ # Signatures is a dict of dicts, and this is faster than doing a
+ # copy.deepcopy
+ signatures = {
+ name: {sig_id: sig for sig_id, sig in sigs.items()}
+ for name, sigs in event_dict.pop("signatures", {}).items()
+ }
+
+ unsigned = dict(event_dict.pop("unsigned", {}))
+
+ if USE_FROZEN_DICTS:
+ frozen_dict = freeze(event_dict)
+ else:
+ frozen_dict = event_dict
+
+ super(FrozenEvent, self).__init__(
+ frozen_dict,
+ signatures=signatures,
+ unsigned=unsigned,
+ internal_metadata_dict=internal_metadata_dict,
+ rejected_reason=rejected_reason,
+ )
+
+ @staticmethod
+ def from_event(event):
+ e = FrozenEvent(
+ event.get_pdu_json()
+ )
+
+ e.internal_metadata = event.internal_metadata
+
+ return e
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
+ self.event_id, self.type, self.get("state_key", None),
+ )
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
new file mode 100644
index 00000000..9d45bdb8
--- /dev/null
+++ b/synapse/events/builder.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import EventBase, FrozenEvent
+
+from synapse.types import EventID
+
+from synapse.util.stringutils import random_string
+
+import copy
+
+
+class EventBuilder(EventBase):
+ def __init__(self, key_values={}, internal_metadata_dict={}):
+ signatures = copy.deepcopy(key_values.pop("signatures", {}))
+ unsigned = copy.deepcopy(key_values.pop("unsigned", {}))
+
+ super(EventBuilder, self).__init__(
+ key_values,
+ signatures=signatures,
+ unsigned=unsigned,
+ internal_metadata_dict=internal_metadata_dict,
+ )
+
+ def build(self):
+ return FrozenEvent.from_event(self)
+
+
+class EventBuilderFactory(object):
+ def __init__(self, clock, hostname):
+ self.clock = clock
+ self.hostname = hostname
+
+ self.event_id_count = 0
+
+ def create_event_id(self):
+ i = str(self.event_id_count)
+ self.event_id_count += 1
+
+ local_part = str(int(self.clock.time())) + i + random_string(5)
+
+ e_id = EventID.create(local_part, self.hostname)
+
+ return e_id.to_string()
+
+ def new(self, key_values={}):
+ key_values["event_id"] = self.create_event_id()
+
+ time_now = int(self.clock.time_msec())
+
+ key_values.setdefault("origin", self.hostname)
+ key_values.setdefault("origin_server_ts", time_now)
+
+ key_values.setdefault("unsigned", {})
+ age = key_values["unsigned"].pop("age", 0)
+ key_values["unsigned"].setdefault("age_ts", time_now - age)
+
+ key_values["signatures"] = {}
+
+ return EventBuilder(key_values=key_values,)
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
new file mode 100644
index 00000000..4ecadf08
--- /dev/null
+++ b/synapse/events/snapshot.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class EventContext(object):
+
+ def __init__(self, current_state=None):
+ self.current_state = current_state
+ self.state_group = None
+ self.rejected = False
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
new file mode 100644
index 00000000..9989b765
--- /dev/null
+++ b/synapse/events/utils.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api.constants import EventTypes
+from . import EventBase
+
+
+def prune_event(event):
+ """ Returns a pruned version of the given event, which removes all keys we
+ don't know about or think could potentially be dodgy.
+
+ This is used when we "redact" an event. We want to remove all fields that
+ the user has specified, but we do want to keep necessary information like
+ type, state_key etc.
+ """
+ event_type = event.type
+
+ allowed_keys = [
+ "event_id",
+ "sender",
+ "room_id",
+ "hashes",
+ "signatures",
+ "content",
+ "type",
+ "state_key",
+ "depth",
+ "prev_events",
+ "prev_state",
+ "auth_events",
+ "origin",
+ "origin_server_ts",
+ "membership",
+ ]
+
+ event_dict = event.get_dict()
+
+ new_content = {}
+
+ def add_fields(*fields):
+ for field in fields:
+ if field in event.content:
+ new_content[field] = event_dict["content"][field]
+
+ if event_type == EventTypes.Member:
+ add_fields("membership")
+ elif event_type == EventTypes.Create:
+ add_fields("creator")
+ elif event_type == EventTypes.JoinRules:
+ add_fields("join_rule")
+ elif event_type == EventTypes.PowerLevels:
+ add_fields(
+ "users",
+ "users_default",
+ "events",
+ "events_default",
+ "state_default",
+ "ban",
+ "kick",
+ "redact",
+ )
+ elif event_type == EventTypes.Aliases:
+ add_fields("aliases")
+ elif event_type == EventTypes.RoomHistoryVisibility:
+ add_fields("history_visibility")
+
+ allowed_fields = {
+ k: v
+ for k, v in event_dict.items()
+ if k in allowed_keys
+ }
+
+ allowed_fields["content"] = new_content
+
+ allowed_fields["unsigned"] = {}
+
+ if "age_ts" in event.unsigned:
+ allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
+
+ return type(event)(
+ allowed_fields,
+ internal_metadata_dict=event.internal_metadata.get_dict()
+ )
+
+
+def format_event_raw(d):
+ return d
+
+
+def format_event_for_client_v1(d):
+ d["user_id"] = d.pop("sender", None)
+
+ move_keys = (
+ "age", "redacted_because", "replaces_state", "prev_content",
+ "invite_room_state",
+ )
+ for key in move_keys:
+ if key in d["unsigned"]:
+ d[key] = d["unsigned"][key]
+
+ drop_keys = (
+ "auth_events", "prev_events", "hashes", "signatures", "depth",
+ "unsigned", "origin", "prev_state"
+ )
+ for key in drop_keys:
+ d.pop(key, None)
+ return d
+
+
+def format_event_for_client_v2(d):
+ drop_keys = (
+ "auth_events", "prev_events", "hashes", "signatures", "depth",
+ "origin", "prev_state",
+ )
+ for key in drop_keys:
+ d.pop(key, None)
+ return d
+
+
+def format_event_for_client_v2_without_event_id(d):
+ d = format_event_for_client_v2(d)
+ d.pop("room_id", None)
+ d.pop("event_id", None)
+ return d
+
+
+def serialize_event(e, time_now_ms, as_client_event=True,
+ event_format=format_event_for_client_v1,
+ token_id=None):
+ # FIXME(erikj): To handle the case of presence events and the like
+ if not isinstance(e, EventBase):
+ return e
+
+ time_now_ms = int(time_now_ms)
+
+ # Should this strip out None's?
+ d = {k: v for k, v in e.get_dict().items()}
+
+ if "age_ts" in d["unsigned"]:
+ d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
+ del d["unsigned"]["age_ts"]
+
+ if "redacted_because" in e.unsigned:
+ d["unsigned"]["redacted_because"] = serialize_event(
+ e.unsigned["redacted_because"], time_now_ms,
+ event_format=event_format
+ )
+
+ if token_id is not None:
+ if token_id == getattr(e.internal_metadata, "token_id", None):
+ txn_id = getattr(e.internal_metadata, "txn_id", None)
+ if txn_id is not None:
+ d["unsigned"]["transaction_id"] = txn_id
+
+ if as_client_event:
+ return event_format(d)
+ else:
+ return d
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
new file mode 100644
index 00000000..0ee6872d
--- /dev/null
+++ b/synapse/events/validator.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.types import EventID, RoomID, UserID
+from synapse.api.errors import SynapseError
+from synapse.api.constants import EventTypes, Membership
+
+
+class EventValidator(object):
+
+ def validate(self, event):
+ EventID.from_string(event.event_id)
+ RoomID.from_string(event.room_id)
+
+ required = [
+ # "auth_events",
+ "content",
+ # "hashes",
+ "origin",
+ # "prev_events",
+ "sender",
+ "type",
+ ]
+
+ for k in required:
+ if not hasattr(event, k):
+ raise SynapseError(400, "Event does not have key %s" % (k,))
+
+ # Check that the following keys have string values
+ strings = [
+ "origin",
+ "sender",
+ "type",
+ ]
+
+ if hasattr(event, "state_key"):
+ strings.append("state_key")
+
+ for s in strings:
+ if not isinstance(getattr(event, s), basestring):
+ raise SynapseError(400, "Not '%s' a string type" % (s,))
+
+ if event.type == EventTypes.Member:
+ if "membership" not in event.content:
+ raise SynapseError(400, "Content has not membership key")
+
+ if event.content["membership"] not in Membership.LIST:
+ raise SynapseError(400, "Invalid membership key")
+
+ # Check that the following keys have dictionary values
+ # TODO
+
+ # Check that the following keys have the correct format for DAGs
+ # TODO
+
+ def validate_new(self, event):
+ self.validate(event)
+
+ UserID.from_string(event.sender)
+
+ if event.type == EventTypes.Message:
+ strings = [
+ "body",
+ "msgtype",
+ ]
+
+ self._ensure_strings(event.content, strings)
+
+ elif event.type == EventTypes.Topic:
+ self._ensure_strings(event.content, ["topic"])
+
+ elif event.type == EventTypes.Name:
+ self._ensure_strings(event.content, ["name"])
+
+ def _ensure_strings(self, d, keys):
+ for s in keys:
+ if s not in d:
+ raise SynapseError(400, "'%s' not in content" % (s,))
+ if not isinstance(d[s], basestring):
+ raise SynapseError(400, "Not '%s' a string type" % (s,))
diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py
new file mode 100644
index 00000000..7517c529
--- /dev/null
+++ b/synapse/federation/__init__.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This package includes all the federation specific logic.
+"""
+
+from .replication import ReplicationLayer
+from .transport import TransportLayer
+
+
+def initialize_http_replication(homeserver):
+ transport = TransportLayer(
+ homeserver,
+ homeserver.hostname,
+ server=homeserver.get_resource_for_federation(),
+ client=homeserver.get_http_client()
+ )
+
+ return ReplicationLayer(homeserver, transport)
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
new file mode 100644
index 00000000..bdfa2476
--- /dev/null
+++ b/synapse/federation/federation_base.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from synapse.events.utils import prune_event
+
+from synapse.crypto.event_signing import check_event_content_hash
+
+from synapse.api.errors import SynapseError
+
+from synapse.util import unwrapFirstError
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class FederationBase(object):
+ @defer.inlineCallbacks
+ def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
+ include_none=False):
+ """Takes a list of PDUs and checks the signatures and hashs of each
+ one. If a PDU fails its signature check then we check if we have it in
+ the database and if not then request if from the originating server of
+ that PDU.
+
+ If a PDU fails its content hash check then it is redacted.
+
+ The given list of PDUs are not modified, instead the function returns
+ a new list.
+
+ Args:
+ pdu (list)
+ outlier (bool)
+
+ Returns:
+ Deferred : A list of PDUs that have valid signatures and hashes.
+ """
+ deferreds = self._check_sigs_and_hashes(pdus)
+
+ def callback(pdu):
+ return pdu
+
+ def errback(failure, pdu):
+ failure.trap(SynapseError)
+ return None
+
+ def try_local_db(res, pdu):
+ if not res:
+ # Check local db.
+ return self.store.get_event(
+ pdu.event_id,
+ allow_rejected=True,
+ allow_none=True,
+ )
+ return res
+
+ def try_remote(res, pdu):
+ if not res and pdu.origin != origin:
+ return self.get_pdu(
+ destinations=[pdu.origin],
+ event_id=pdu.event_id,
+ outlier=outlier,
+ timeout=10000,
+ ).addErrback(lambda e: None)
+ return res
+
+ def warn(res, pdu):
+ if not res:
+ logger.warn(
+ "Failed to find copy of %s with valid signature",
+ pdu.event_id,
+ )
+ return res
+
+ for pdu, deferred in zip(pdus, deferreds):
+ deferred.addCallbacks(
+ callback, errback, errbackArgs=[pdu]
+ ).addCallback(
+ try_local_db, pdu
+ ).addCallback(
+ try_remote, pdu
+ ).addCallback(
+ warn, pdu
+ )
+
+ valid_pdus = yield defer.gatherResults(
+ deferreds,
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ if include_none:
+ defer.returnValue(valid_pdus)
+ else:
+ defer.returnValue([p for p in valid_pdus if p])
+
+ def _check_sigs_and_hash(self, pdu):
+ return self._check_sigs_and_hashes([pdu])[0]
+
+ def _check_sigs_and_hashes(self, pdus):
+ """Throws a SynapseError if a PDU does not have the correct
+ signatures.
+
+ Returns:
+ FrozenEvent: Either the given event or it redacted if it failed the
+ content hash check.
+ """
+
+ redacted_pdus = [
+ prune_event(pdu)
+ for pdu in pdus
+ ]
+
+ deferreds = self.keyring.verify_json_objects_for_server([
+ (p.origin, p.get_pdu_json())
+ for p in redacted_pdus
+ ])
+
+ def callback(_, pdu, redacted):
+ if not check_event_content_hash(pdu):
+ logger.warn(
+ "Event content has been tampered, redacting %s: %s",
+ pdu.event_id, pdu.get_pdu_json()
+ )
+ return redacted
+ return pdu
+
+ def errback(failure, pdu):
+ failure.trap(SynapseError)
+ logger.warn(
+ "Signature check failed for %s",
+ pdu.event_id,
+ )
+ return failure
+
+ for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
+ deferred.addCallbacks(
+ callback, errback,
+ callbackArgs=[pdu, redacted],
+ errbackArgs=[pdu],
+ )
+
+ return deferreds
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
new file mode 100644
index 00000000..c6a8c124
--- /dev/null
+++ b/synapse/federation/federation_client.py
@@ -0,0 +1,732 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from .federation_base import FederationBase
+from synapse.api.constants import Membership
+from .units import Edu
+
+from synapse.api.errors import (
+ CodeMessageException, HttpResponseException, SynapseError,
+)
+from synapse.util import unwrapFirstError
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.logutils import log_function
+from synapse.events import FrozenEvent
+import synapse.metrics
+
+from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
+
+import copy
+import itertools
+import logging
+import random
+
+
+logger = logging.getLogger(__name__)
+
+
+# synapse.federation.federation_client is a silly name
+metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
+
+sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
+
+sent_edus_counter = metrics.register_counter("sent_edus")
+
+sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
+
+
+class FederationClient(FederationBase):
+
+ def start_get_pdu_cache(self):
+ self._get_pdu_cache = ExpiringCache(
+ cache_name="get_pdu_cache",
+ clock=self._clock,
+ max_len=1000,
+ expiry_ms=120*1000,
+ reset_expiry_on_get=False,
+ )
+
+ self._get_pdu_cache.start()
+
+ @log_function
+ def send_pdu(self, pdu, destinations):
+ """Informs the replication layer about a new PDU generated within the
+ home server that should be transmitted to others.
+
+ TODO: Figure out when we should actually resolve the deferred.
+
+ Args:
+ pdu (Pdu): The new Pdu.
+
+ Returns:
+ Deferred: Completes when we have successfully processed the PDU
+ and replicated it to any interested remote home servers.
+ """
+ order = self._order
+ self._order += 1
+
+ sent_pdus_destination_dist.inc_by(len(destinations))
+
+ logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
+
+ # TODO, add errback, etc.
+ self._transaction_queue.enqueue_pdu(pdu, destinations, order)
+
+ logger.debug(
+ "[%s] transaction_layer.enqueue_pdu... done",
+ pdu.event_id
+ )
+
+ @log_function
+ def send_edu(self, destination, edu_type, content):
+ edu = Edu(
+ origin=self.server_name,
+ destination=destination,
+ edu_type=edu_type,
+ content=content,
+ )
+
+ sent_edus_counter.inc()
+
+ # TODO, add errback, etc.
+ self._transaction_queue.enqueue_edu(edu)
+ return defer.succeed(None)
+
+ @log_function
+ def send_failure(self, failure, destination):
+ self._transaction_queue.enqueue_failure(failure, destination)
+ return defer.succeed(None)
+
+ @log_function
+ def make_query(self, destination, query_type, args,
+ retry_on_dns_fail=True):
+ """Sends a federation Query to a remote homeserver of the given type
+ and arguments.
+
+ Args:
+ destination (str): Domain name of the remote homeserver
+ query_type (str): Category of the query type; should match the
+ handler name used in register_query_handler().
+ args (dict): Mapping of strings to strings containing the details
+ of the query request.
+
+ Returns:
+ a Deferred which will eventually yield a JSON object from the
+ response
+ """
+ sent_queries_counter.inc(query_type)
+
+ return self.transport_layer.make_query(
+ destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
+ )
+
+ @log_function
+ def query_client_keys(self, destination, content):
+ """Query device keys for a device hosted on a remote server.
+
+ Args:
+ destination (str): Domain name of the remote homeserver
+ content (dict): The query content.
+
+ Returns:
+ a Deferred which will eventually yield a JSON object from the
+ response
+ """
+ sent_queries_counter.inc("client_device_keys")
+ return self.transport_layer.query_client_keys(destination, content)
+
+ @log_function
+ def claim_client_keys(self, destination, content):
+ """Claims one-time keys for a device hosted on a remote server.
+
+ Args:
+ destination (str): Domain name of the remote homeserver
+ content (dict): The query content.
+
+ Returns:
+ a Deferred which will eventually yield a JSON object from the
+ response
+ """
+ sent_queries_counter.inc("client_one_time_keys")
+ return self.transport_layer.claim_client_keys(destination, content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def backfill(self, dest, context, limit, extremities):
+ """Requests some more historic PDUs for the given context from the
+ given destination server.
+
+ Args:
+ dest (str): The remote home server to ask.
+ context (str): The context to backfill.
+ limit (int): The maximum number of PDUs to return.
+ extremities (list): List of PDU id and origins of the first pdus
+ we have seen from the context
+
+ Returns:
+ Deferred: Results in the received PDUs.
+ """
+ logger.debug("backfill extrem=%s", extremities)
+
+ # If there are no extremeties then we've (probably) reached the start.
+ if not extremities:
+ return
+
+ transaction_data = yield self.transport_layer.backfill(
+ dest, context, extremities, limit)
+
+ logger.debug("backfill transaction_data=%s", repr(transaction_data))
+
+ pdus = [
+ self.event_from_pdu_json(p, outlier=False)
+ for p in transaction_data["pdus"]
+ ]
+
+ # FIXME: We should handle signature failures more gracefully.
+ pdus[:] = yield defer.gatherResults(
+ self._check_sigs_and_hashes(pdus),
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ defer.returnValue(pdus)
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
+ """Requests the PDU with given origin and ID from the remote home
+ servers.
+
+ Will attempt to get the PDU from each destination in the list until
+ one succeeds.
+
+ This will persist the PDU locally upon receipt.
+
+ Args:
+ destinations (list): Which home servers to query
+ pdu_origin (str): The home server that originally sent the pdu.
+ event_id (str)
+ outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
+ it's from an arbitary point in the context as opposed to part
+ of the current block of PDUs. Defaults to `False`
+ timeout (int): How long to try (in ms) each destination for before
+ moving to the next destination. None indicates no timeout.
+
+ Returns:
+ Deferred: Results in the requested PDU.
+ """
+
+ # TODO: Rate limit the number of times we try and get the same event.
+
+ if self._get_pdu_cache:
+ e = self._get_pdu_cache.get(event_id)
+ if e:
+ defer.returnValue(e)
+
+ pdu = None
+ for destination in destinations:
+ try:
+ limiter = yield get_retry_limiter(
+ destination,
+ self._clock,
+ self.store,
+ )
+
+ with limiter:
+ transaction_data = yield self.transport_layer.get_event(
+ destination, event_id, timeout=timeout,
+ )
+
+ logger.debug("transaction_data %r", transaction_data)
+
+ pdu_list = [
+ self.event_from_pdu_json(p, outlier=outlier)
+ for p in transaction_data["pdus"]
+ ]
+
+ if pdu_list and pdu_list[0]:
+ pdu = pdu_list[0]
+
+ # Check signatures are correct.
+ pdu = yield self._check_sigs_and_hashes([pdu])[0]
+
+ break
+
+ except SynapseError:
+ logger.info(
+ "Failed to get PDU %s from %s because %s",
+ event_id, destination, e,
+ )
+ continue
+ except CodeMessageException as e:
+ if 400 <= e.code < 500:
+ raise
+
+ logger.info(
+ "Failed to get PDU %s from %s because %s",
+ event_id, destination, e,
+ )
+ continue
+ except NotRetryingDestination as e:
+ logger.info(e.message)
+ continue
+ except Exception as e:
+ logger.info(
+ "Failed to get PDU %s from %s because %s",
+ event_id, destination, e,
+ )
+ continue
+
+ if self._get_pdu_cache is not None and pdu:
+ self._get_pdu_cache[event_id] = pdu
+
+ defer.returnValue(pdu)
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_state_for_room(self, destination, room_id, event_id):
+ """Requests all of the `current` state PDUs for a given room from
+ a remote home server.
+
+ Args:
+ destination (str): The remote homeserver to query for the state.
+ room_id (str): The id of the room we're interested in.
+ event_id (str): The id of the event we want the state at.
+
+ Returns:
+ Deferred: Results in a list of PDUs.
+ """
+
+ result = yield self.transport_layer.get_room_state(
+ destination, room_id, event_id=event_id,
+ )
+
+ pdus = [
+ self.event_from_pdu_json(p, outlier=True) for p in result["pdus"]
+ ]
+
+ auth_chain = [
+ self.event_from_pdu_json(p, outlier=True)
+ for p in result.get("auth_chain", [])
+ ]
+
+ signed_pdus = yield self._check_sigs_and_hash_and_fetch(
+ destination, pdus, outlier=True
+ )
+
+ signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ destination, auth_chain, outlier=True
+ )
+
+ signed_auth.sort(key=lambda e: e.depth)
+
+ defer.returnValue((signed_pdus, signed_auth))
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_event_auth(self, destination, room_id, event_id):
+ res = yield self.transport_layer.get_event_auth(
+ destination, room_id, event_id,
+ )
+
+ auth_chain = [
+ self.event_from_pdu_json(p, outlier=True)
+ for p in res["auth_chain"]
+ ]
+
+ signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ destination, auth_chain, outlier=True
+ )
+
+ signed_auth.sort(key=lambda e: e.depth)
+
+ defer.returnValue(signed_auth)
+
+ @defer.inlineCallbacks
+ def make_membership_event(self, destinations, room_id, user_id, membership,
+ content={},):
+ """
+ Creates an m.room.member event, with context, without participating in the room.
+
+ Does so by asking one of the already participating servers to create an
+ event with proper context.
+
+ Note that this does not append any events to any graphs.
+
+ Args:
+ destinations (str): Candidate homeservers which are probably
+ participating in the room.
+ room_id (str): The room in which the event will happen.
+ user_id (str): The user whose membership is being evented.
+ membership (str): The "membership" property of the event. Must be
+ one of "join" or "leave".
+ content (object): Any additional data to put into the content field
+ of the event.
+ Return:
+ A tuple of (origin (str), event (object)) where origin is the remote
+ homeserver which generated the event.
+ """
+ valid_memberships = {Membership.JOIN, Membership.LEAVE}
+ if membership not in valid_memberships:
+ raise RuntimeError(
+ "make_membership_event called with membership='%s', must be one of %s" %
+ (membership, ",".join(valid_memberships))
+ )
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+
+ try:
+ ret = yield self.transport_layer.make_membership_event(
+ destination, room_id, user_id, membership
+ )
+
+ pdu_dict = ret["event"]
+
+ logger.debug("Got response to make_%s: %s", membership, pdu_dict)
+
+ pdu_dict["content"].update(content)
+
+ # The protoevent received over the JSON wire may not have all
+ # the required fields. Lets just gloss over that because
+ # there's some we never care about
+ if "prev_state" not in pdu_dict:
+ pdu_dict["prev_state"] = []
+
+ defer.returnValue(
+ (destination, self.event_from_pdu_json(pdu_dict))
+ )
+ break
+ except CodeMessageException:
+ raise
+ except Exception as e:
+ logger.warn(
+ "Failed to make_%s via %s: %s",
+ membership, destination, e.message
+ )
+
+ raise RuntimeError("Failed to send to any server.")
+
+ @defer.inlineCallbacks
+ def send_join(self, destinations, pdu):
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+
+ try:
+ time_now = self._clock.time_msec()
+ _, content = yield self.transport_layer.send_join(
+ destination=destination,
+ room_id=pdu.room_id,
+ event_id=pdu.event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
+
+ logger.debug("Got content: %s", content)
+
+ state = [
+ self.event_from_pdu_json(p, outlier=True)
+ for p in content.get("state", [])
+ ]
+
+ auth_chain = [
+ self.event_from_pdu_json(p, outlier=True)
+ for p in content.get("auth_chain", [])
+ ]
+
+ pdus = {
+ p.event_id: p
+ for p in itertools.chain(state, auth_chain)
+ }
+
+ valid_pdus = yield self._check_sigs_and_hash_and_fetch(
+ destination, pdus.values(),
+ outlier=True,
+ )
+
+ valid_pdus_map = {
+ p.event_id: p
+ for p in valid_pdus
+ }
+
+ # NB: We *need* to copy to ensure that we don't have multiple
+ # references being passed on, as that causes... issues.
+ signed_state = [
+ copy.copy(valid_pdus_map[p.event_id])
+ for p in state
+ if p.event_id in valid_pdus_map
+ ]
+
+ signed_auth = [
+ valid_pdus_map[p.event_id]
+ for p in auth_chain
+ if p.event_id in valid_pdus_map
+ ]
+
+ # NB: We *need* to copy to ensure that we don't have multiple
+ # references being passed on, as that causes... issues.
+ for s in signed_state:
+ s.internal_metadata = copy.deepcopy(s.internal_metadata)
+
+ auth_chain.sort(key=lambda e: e.depth)
+
+ defer.returnValue({
+ "state": signed_state,
+ "auth_chain": signed_auth,
+ "origin": destination,
+ })
+ except CodeMessageException:
+ raise
+ except Exception as e:
+ logger.exception(
+ "Failed to send_join via %s: %s",
+ destination, e.message
+ )
+
+ raise RuntimeError("Failed to send to any server.")
+
+ @defer.inlineCallbacks
+ def send_invite(self, destination, room_id, event_id, pdu):
+ time_now = self._clock.time_msec()
+ code, content = yield self.transport_layer.send_invite(
+ destination=destination,
+ room_id=room_id,
+ event_id=event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
+
+ pdu_dict = content["event"]
+
+ logger.debug("Got response to send_invite: %s", pdu_dict)
+
+ pdu = self.event_from_pdu_json(pdu_dict)
+
+ # Check signatures are correct.
+ pdu = yield self._check_sigs_and_hash(pdu)
+
+ # FIXME: We should handle signature failures more gracefully.
+
+ defer.returnValue(pdu)
+
+ @defer.inlineCallbacks
+ def send_leave(self, destinations, pdu):
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+
+ try:
+ time_now = self._clock.time_msec()
+ _, content = yield self.transport_layer.send_leave(
+ destination=destination,
+ room_id=pdu.room_id,
+ event_id=pdu.event_id,
+ content=pdu.get_pdu_json(time_now),
+ )
+
+ logger.debug("Got content: %s", content)
+ defer.returnValue(None)
+ except CodeMessageException:
+ raise
+ except Exception as e:
+ logger.exception(
+ "Failed to send_leave via %s: %s",
+ destination, e.message
+ )
+
+ raise RuntimeError("Failed to send to any server.")
+
+ @defer.inlineCallbacks
+ def query_auth(self, destination, room_id, event_id, local_auth):
+ """
+ Params:
+ destination (str)
+ event_it (str)
+ local_auth (list)
+ """
+ time_now = self._clock.time_msec()
+
+ send_content = {
+ "auth_chain": [e.get_pdu_json(time_now) for e in local_auth],
+ }
+
+ code, content = yield self.transport_layer.send_query_auth(
+ destination=destination,
+ room_id=room_id,
+ event_id=event_id,
+ content=send_content,
+ )
+
+ auth_chain = [
+ self.event_from_pdu_json(e)
+ for e in content["auth_chain"]
+ ]
+
+ signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ destination, auth_chain, outlier=True
+ )
+
+ signed_auth.sort(key=lambda e: e.depth)
+
+ ret = {
+ "auth_chain": signed_auth,
+ "rejects": content.get("rejects", []),
+ "missing": content.get("missing", []),
+ }
+
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def get_missing_events(self, destination, room_id, earliest_events_ids,
+ latest_events, limit, min_depth):
+ """Tries to fetch events we are missing. This is called when we receive
+ an event without having received all of its ancestors.
+
+ Args:
+ destination (str)
+ room_id (str)
+ earliest_events_ids (list): List of event ids. Effectively the
+ events we expected to receive, but haven't. `get_missing_events`
+ should only return events that didn't happen before these.
+ latest_events (list): List of events we have received that we don't
+ have all previous events for.
+ limit (int): Maximum number of events to return.
+ min_depth (int): Minimum depth of events tor return.
+ """
+ try:
+ content = yield self.transport_layer.get_missing_events(
+ destination=destination,
+ room_id=room_id,
+ earliest_events=earliest_events_ids,
+ latest_events=[e.event_id for e in latest_events],
+ limit=limit,
+ min_depth=min_depth,
+ )
+
+ events = [
+ self.event_from_pdu_json(e)
+ for e in content.get("events", [])
+ ]
+
+ signed_events = yield self._check_sigs_and_hash_and_fetch(
+ destination, events, outlier=False
+ )
+
+ have_gotten_all_from_destination = True
+ except HttpResponseException as e:
+ if not e.code == 400:
+ raise
+
+ # We are probably hitting an old server that doesn't support
+ # get_missing_events
+ signed_events = []
+ have_gotten_all_from_destination = False
+
+ if len(signed_events) >= limit:
+ defer.returnValue(signed_events)
+
+ servers = yield self.store.get_joined_hosts_for_room(room_id)
+
+ servers = set(servers)
+ servers.discard(self.server_name)
+
+ failed_to_fetch = set()
+
+ while len(signed_events) < limit:
+ # Are we missing any?
+
+ seen_events = set(earliest_events_ids)
+ seen_events.update(e.event_id for e in signed_events if e)
+
+ missing_events = {}
+ for e in itertools.chain(latest_events, signed_events):
+ if e.depth > min_depth:
+ missing_events.update({
+ e_id: e.depth for e_id, _ in e.prev_events
+ if e_id not in seen_events
+ and e_id not in failed_to_fetch
+ })
+
+ if not missing_events:
+ break
+
+ have_seen = yield self.store.have_events(missing_events)
+
+ for k in have_seen:
+ missing_events.pop(k, None)
+
+ if not missing_events:
+ break
+
+ # Okay, we haven't gotten everything yet. Lets get them.
+ ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
+
+ if have_gotten_all_from_destination:
+ servers.discard(destination)
+
+ def random_server_list():
+ srvs = list(servers)
+ random.shuffle(srvs)
+ return srvs
+
+ deferreds = [
+ self.get_pdu(
+ destinations=random_server_list(),
+ event_id=e_id,
+ )
+ for e_id, depth in ordered_missing[:limit - len(signed_events)]
+ ]
+
+ res = yield defer.DeferredList(deferreds, consumeErrors=True)
+ for (result, val), (e_id, _) in zip(res, ordered_missing):
+ if result and val:
+ signed_events.append(val)
+ else:
+ failed_to_fetch.add(e_id)
+
+ defer.returnValue(signed_events)
+
+ def event_from_pdu_json(self, pdu_json, outlier=False):
+ event = FrozenEvent(
+ pdu_json
+ )
+
+ event.internal_metadata.outlier = outlier
+
+ return event
+
+ @defer.inlineCallbacks
+ def forward_third_party_invite(self, destinations, room_id, event_dict):
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+
+ try:
+ yield self.transport_layer.exchange_third_party_invite(
+ destination=destination,
+ room_id=room_id,
+ event_dict=event_dict,
+ )
+ defer.returnValue(None)
+ except CodeMessageException:
+ raise
+ except Exception as e:
+ logger.exception(
+ "Failed to send_third_party_invite via %s: %s",
+ destination, e.message
+ )
+
+ raise RuntimeError("Failed to send to any server.")
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
new file mode 100644
index 00000000..7a59436a
--- /dev/null
+++ b/synapse/federation/federation_server.py
@@ -0,0 +1,557 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from .federation_base import FederationBase
+from .units import Transaction, Edu
+
+from synapse.util.logutils import log_function
+from synapse.events import FrozenEvent
+import synapse.metrics
+
+from synapse.api.errors import FederationError, SynapseError
+
+from synapse.crypto.event_signing import compute_event_signature
+
+import simplejson as json
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+# synapse.federation.federation_server is a silly name
+metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
+
+received_pdus_counter = metrics.register_counter("received_pdus")
+
+received_edus_counter = metrics.register_counter("received_edus")
+
+received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
+
+
+class FederationServer(FederationBase):
+ def set_handler(self, handler):
+ """Sets the handler that the replication layer will use to communicate
+ receipt of new PDUs from other home servers. The required methods are
+ documented on :py:class:`.ReplicationHandler`.
+ """
+ self.handler = handler
+
+ def register_edu_handler(self, edu_type, handler):
+ if edu_type in self.edu_handlers:
+ raise KeyError("Already have an EDU handler for %s" % (edu_type,))
+
+ self.edu_handlers[edu_type] = handler
+
+ def register_query_handler(self, query_type, handler):
+ """Sets the handler callable that will be used to handle an incoming
+ federation Query of the given type.
+
+ Args:
+ query_type (str): Category name of the query, which should match
+ the string used by make_query.
+ handler (callable): Invoked to handle incoming queries of this type
+
+ handler is invoked as:
+ result = handler(args)
+
+ where 'args' is a dict mapping strings to strings of the query
+ arguments. It should return a Deferred that will eventually yield an
+ object to encode as JSON.
+ """
+ if query_type in self.query_handlers:
+ raise KeyError(
+ "Already have a Query handler for %s" % (query_type,)
+ )
+
+ self.query_handlers[query_type] = handler
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_backfill_request(self, origin, room_id, versions, limit):
+ pdus = yield self.handler.on_backfill_request(
+ origin, room_id, versions, limit
+ )
+
+ defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_incoming_transaction(self, transaction_data):
+ transaction = Transaction(**transaction_data)
+
+ received_pdus_counter.inc_by(len(transaction.pdus))
+
+ for p in transaction.pdus:
+ if "unsigned" in p:
+ unsigned = p["unsigned"]
+ if "age" in unsigned:
+ p["age"] = unsigned["age"]
+ if "age" in p:
+ p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
+ del p["age"]
+
+ pdu_list = [
+ self.event_from_pdu_json(p) for p in transaction.pdus
+ ]
+
+ logger.debug("[%s] Got transaction", transaction.transaction_id)
+
+ response = yield self.transaction_actions.have_responded(transaction)
+
+ if response:
+ logger.debug(
+ "[%s] We've already responed to this request",
+ transaction.transaction_id
+ )
+ defer.returnValue(response)
+ return
+
+ logger.debug("[%s] Transaction is new", transaction.transaction_id)
+
+ results = []
+
+ for pdu in pdu_list:
+ d = self._handle_new_pdu(transaction.origin, pdu)
+
+ try:
+ yield d
+ results.append({})
+ except FederationError as e:
+ self.send_failure(e, transaction.origin)
+ results.append({"error": str(e)})
+ except Exception as e:
+ results.append({"error": str(e)})
+ logger.exception("Failed to handle PDU")
+
+ if hasattr(transaction, "edus"):
+ for edu in [Edu(**x) for x in transaction.edus]:
+ self.received_edu(
+ transaction.origin,
+ edu.edu_type,
+ edu.content
+ )
+
+ for failure in getattr(transaction, "pdu_failures", []):
+ logger.info("Got failure %r", failure)
+
+ logger.debug("Returning: %s", str(results))
+
+ response = {
+ "pdus": dict(zip(
+ (p.event_id for p in pdu_list), results
+ )),
+ }
+
+ yield self.transaction_actions.set_response(
+ transaction,
+ 200, response
+ )
+ defer.returnValue((200, response))
+
+ def received_edu(self, origin, edu_type, content):
+ received_edus_counter.inc()
+
+ if edu_type in self.edu_handlers:
+ self.edu_handlers[edu_type](origin, content)
+ else:
+ logger.warn("Received EDU of type %s with no handler", edu_type)
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_context_state_request(self, origin, room_id, event_id):
+ if event_id:
+ pdus = yield self.handler.get_state_for_pdu(
+ origin, room_id, event_id,
+ )
+ auth_chain = yield self.store.get_auth_chain(
+ [pdu.event_id for pdu in pdus]
+ )
+
+ for event in auth_chain:
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+ else:
+ raise NotImplementedError("Specify an event")
+
+ defer.returnValue((200, {
+ "pdus": [pdu.get_pdu_json() for pdu in pdus],
+ "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
+ }))
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_pdu_request(self, origin, event_id):
+ pdu = yield self._get_persisted_pdu(origin, event_id)
+
+ if pdu:
+ defer.returnValue(
+ (200, self._transaction_from_pdus([pdu]).get_dict())
+ )
+ else:
+ defer.returnValue((404, ""))
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_pull_request(self, origin, versions):
+ raise NotImplementedError("Pull transactions not implemented")
+
+ @defer.inlineCallbacks
+ def on_query_request(self, query_type, args):
+ received_queries_counter.inc(query_type)
+
+ if query_type in self.query_handlers:
+ response = yield self.query_handlers[query_type](args)
+ defer.returnValue((200, response))
+ else:
+ defer.returnValue(
+ (404, "No handler for Query type '%s'" % (query_type,))
+ )
+
+ @defer.inlineCallbacks
+ def on_make_join_request(self, room_id, user_id):
+ pdu = yield self.handler.on_make_join_request(room_id, user_id)
+ time_now = self._clock.time_msec()
+ defer.returnValue({"event": pdu.get_pdu_json(time_now)})
+
+ @defer.inlineCallbacks
+ def on_invite_request(self, origin, content):
+ pdu = self.event_from_pdu_json(content)
+ ret_pdu = yield self.handler.on_invite_request(origin, pdu)
+ time_now = self._clock.time_msec()
+ defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
+
+ @defer.inlineCallbacks
+ def on_send_join_request(self, origin, content):
+ logger.debug("on_send_join_request: content: %s", content)
+ pdu = self.event_from_pdu_json(content)
+ logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
+ res_pdus = yield self.handler.on_send_join_request(origin, pdu)
+ time_now = self._clock.time_msec()
+ defer.returnValue((200, {
+ "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
+ "auth_chain": [
+ p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
+ ],
+ }))
+
+ @defer.inlineCallbacks
+ def on_make_leave_request(self, room_id, user_id):
+ pdu = yield self.handler.on_make_leave_request(room_id, user_id)
+ time_now = self._clock.time_msec()
+ defer.returnValue({"event": pdu.get_pdu_json(time_now)})
+
+ @defer.inlineCallbacks
+ def on_send_leave_request(self, origin, content):
+ logger.debug("on_send_leave_request: content: %s", content)
+ pdu = self.event_from_pdu_json(content)
+ logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
+ yield self.handler.on_send_leave_request(origin, pdu)
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_event_auth(self, origin, room_id, event_id):
+ time_now = self._clock.time_msec()
+ auth_pdus = yield self.handler.on_event_auth(event_id)
+ defer.returnValue((200, {
+ "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
+ }))
+
+ @defer.inlineCallbacks
+ def on_query_auth_request(self, origin, content, event_id):
+ """
+ Content is a dict with keys::
+ auth_chain (list): A list of events that give the auth chain.
+ missing (list): A list of event_ids indicating what the other
+ side (`origin`) think we're missing.
+ rejects (dict): A mapping from event_id to a 2-tuple of reason
+ string and a proof (or None) of why the event was rejected.
+ The keys of this dict give the list of events the `origin` has
+ rejected.
+
+ Args:
+ origin (str)
+ content (dict)
+ event_id (str)
+
+ Returns:
+ Deferred: Results in `dict` with the same format as `content`
+ """
+ auth_chain = [
+ self.event_from_pdu_json(e)
+ for e in content["auth_chain"]
+ ]
+
+ signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ origin, auth_chain, outlier=True
+ )
+
+ ret = yield self.handler.on_query_auth(
+ origin,
+ event_id,
+ signed_auth,
+ content.get("rejects", []),
+ content.get("missing", []),
+ )
+
+ time_now = self._clock.time_msec()
+ send_content = {
+ "auth_chain": [
+ e.get_pdu_json(time_now)
+ for e in ret["auth_chain"]
+ ],
+ "rejects": ret.get("rejects", []),
+ "missing": ret.get("missing", []),
+ }
+
+ defer.returnValue(
+ (200, send_content)
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_query_client_keys(self, origin, content):
+ query = []
+ for user_id, device_ids in content.get("device_keys", {}).items():
+ if not device_ids:
+ query.append((user_id, None))
+ else:
+ for device_id in device_ids:
+ query.append((user_id, device_id))
+
+ results = yield self.store.get_e2e_device_keys(query)
+
+ json_result = {}
+ for user_id, device_keys in results.items():
+ for device_id, json_bytes in device_keys.items():
+ json_result.setdefault(user_id, {})[device_id] = json.loads(
+ json_bytes
+ )
+
+ defer.returnValue({"device_keys": json_result})
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_claim_client_keys(self, origin, content):
+ query = []
+ for user_id, device_keys in content.get("one_time_keys", {}).items():
+ for device_id, algorithm in device_keys.items():
+ query.append((user_id, device_id, algorithm))
+
+ results = yield self.store.claim_e2e_one_time_keys(query)
+
+ json_result = {}
+ for user_id, device_keys in results.items():
+ for device_id, keys in device_keys.items():
+ for key_id, json_bytes in keys.items():
+ json_result.setdefault(user_id, {})[device_id] = {
+ key_id: json.loads(json_bytes)
+ }
+
+ defer.returnValue({"one_time_keys": json_result})
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_get_missing_events(self, origin, room_id, earliest_events,
+ latest_events, limit, min_depth):
+ missing_events = yield self.handler.on_get_missing_events(
+ origin, room_id, earliest_events, latest_events, limit, min_depth
+ )
+
+ time_now = self._clock.time_msec()
+
+ defer.returnValue({
+ "events": [ev.get_pdu_json(time_now) for ev in missing_events],
+ })
+
+ @log_function
+ def _get_persisted_pdu(self, origin, event_id, do_auth=True):
+ """ Get a PDU from the database with given origin and id.
+
+ Returns:
+ Deferred: Results in a `Pdu`.
+ """
+ return self.handler.get_persisted_pdu(
+ origin, event_id, do_auth=do_auth
+ )
+
+ def _transaction_from_pdus(self, pdu_list):
+ """Returns a new Transaction containing the given PDUs suitable for
+ transmission.
+ """
+ time_now = self._clock.time_msec()
+ pdus = [p.get_pdu_json(time_now) for p in pdu_list]
+ return Transaction(
+ origin=self.server_name,
+ pdus=pdus,
+ origin_server_ts=int(time_now),
+ destination=None,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def _handle_new_pdu(self, origin, pdu, get_missing=True):
+ # We reprocess pdus when we have seen them only as outliers
+ existing = yield self._get_persisted_pdu(
+ origin, pdu.event_id, do_auth=False
+ )
+
+ # FIXME: Currently we fetch an event again when we already have it
+ # if it has been marked as an outlier.
+
+ already_seen = (
+ existing and (
+ not existing.internal_metadata.is_outlier()
+ or pdu.internal_metadata.is_outlier()
+ )
+ )
+ if already_seen:
+ logger.debug("Already seen pdu %s", pdu.event_id)
+ return
+
+ # Check signature.
+ try:
+ pdu = yield self._check_sigs_and_hash(pdu)
+ except SynapseError as e:
+ raise FederationError(
+ "ERROR",
+ e.code,
+ e.msg,
+ affected=pdu.event_id,
+ )
+
+ state = None
+
+ auth_chain = []
+
+ have_seen = yield self.store.have_events(
+ [ev for ev, _ in pdu.prev_events]
+ )
+
+ fetch_state = False
+
+ # Get missing pdus if necessary.
+ if not pdu.internal_metadata.is_outlier():
+ # We only backfill backwards to the min depth.
+ min_depth = yield self.handler.get_min_depth_for_context(
+ pdu.room_id
+ )
+
+ logger.debug(
+ "_handle_new_pdu min_depth for %s: %d",
+ pdu.room_id, min_depth
+ )
+
+ prevs = {e_id for e_id, _ in pdu.prev_events}
+ seen = set(have_seen.keys())
+
+ if min_depth and pdu.depth < min_depth:
+ # This is so that we don't notify the user about this
+ # message, to work around the fact that some events will
+ # reference really really old events we really don't want to
+ # send to the clients.
+ pdu.internal_metadata.outlier = True
+ elif min_depth and pdu.depth > min_depth:
+ if get_missing and prevs - seen:
+ latest = yield self.store.get_latest_event_ids_in_room(
+ pdu.room_id
+ )
+
+ # We add the prev events that we have seen to the latest
+ # list to ensure the remote server doesn't give them to us
+ latest = set(latest)
+ latest |= seen
+
+ missing_events = yield self.get_missing_events(
+ origin,
+ pdu.room_id,
+ earliest_events_ids=list(latest),
+ latest_events=[pdu],
+ limit=10,
+ min_depth=min_depth,
+ )
+
+ # We want to sort these by depth so we process them and
+ # tell clients about them in order.
+ missing_events.sort(key=lambda x: x.depth)
+
+ for e in missing_events:
+ yield self._handle_new_pdu(
+ origin,
+ e,
+ get_missing=False
+ )
+
+ have_seen = yield self.store.have_events(
+ [ev for ev, _ in pdu.prev_events]
+ )
+
+ prevs = {e_id for e_id, _ in pdu.prev_events}
+ seen = set(have_seen.keys())
+ if prevs - seen:
+ fetch_state = True
+
+ if fetch_state:
+ # We need to get the state at this event, since we haven't
+ # processed all the prev events.
+ logger.debug(
+ "_handle_new_pdu getting state for %s",
+ pdu.room_id
+ )
+ try:
+ state, auth_chain = yield self.get_state_for_room(
+ origin, pdu.room_id, pdu.event_id,
+ )
+ except:
+ logger.warn("Failed to get state for event: %s", pdu.event_id)
+
+ yield self.handler.on_receive_pdu(
+ origin,
+ pdu,
+ backfilled=False,
+ state=state,
+ auth_chain=auth_chain,
+ )
+
+ def __str__(self):
+ return "<ReplicationLayer(%s)>" % self.server_name
+
+ def event_from_pdu_json(self, pdu_json, outlier=False):
+ event = FrozenEvent(
+ pdu_json
+ )
+
+ event.internal_metadata.outlier = outlier
+
+ return event
+
+ @defer.inlineCallbacks
+ def exchange_third_party_invite(self, invite):
+ ret = yield self.handler.exchange_third_party_invite(invite)
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ ret = yield self.handler.on_exchange_third_party_invite_request(
+ origin, room_id, event_dict
+ )
+ defer.returnValue(ret)
diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
new file mode 100644
index 00000000..1a7cc02f
--- /dev/null
+++ b/synapse/federation/persistence.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module contains all the persistence actions done by the federation
+package.
+
+These actions are mostly only used by the :py:mod:`.replication` module.
+"""
+
+from twisted.internet import defer
+
+from synapse.util.logutils import log_function
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class TransactionActions(object):
+ """ Defines persistence actions that relate to handling Transactions.
+ """
+
+ def __init__(self, datastore):
+ self.store = datastore
+
+ @log_function
+ def have_responded(self, transaction):
+ """ Have we already responded to a transaction with the same id and
+ origin?
+
+ Returns:
+ Deferred: Results in `None` if we have not previously responded to
+ this transaction or a 2-tuple of `(int, dict)` representing the
+ response code and response body.
+ """
+ if not transaction.transaction_id:
+ raise RuntimeError("Cannot persist a transaction with no "
+ "transaction_id")
+
+ return self.store.get_received_txn_response(
+ transaction.transaction_id, transaction.origin
+ )
+
+ @log_function
+ def set_response(self, transaction, code, response):
+ """ Persist how we responded to a transaction.
+
+ Returns:
+ Deferred
+ """
+ if not transaction.transaction_id:
+ raise RuntimeError("Cannot persist a transaction with no "
+ "transaction_id")
+
+ return self.store.set_received_txn_response(
+ transaction.transaction_id,
+ transaction.origin,
+ code,
+ response,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def prepare_to_send(self, transaction):
+ """ Persists the `Transaction` we are about to send and works out the
+ correct value for the `prev_ids` key.
+
+ Returns:
+ Deferred
+ """
+ transaction.prev_ids = yield self.store.prep_send_transaction(
+ transaction.transaction_id,
+ transaction.destination,
+ transaction.origin_server_ts,
+ )
+
+ @log_function
+ def delivered(self, transaction, response_code, response_dict):
+ """ Marks the given `Transaction` as having been successfully
+ delivered to the remote homeserver, and what the response was.
+
+ Returns:
+ Deferred
+ """
+ return self.store.delivered_txn(
+ transaction.transaction_id,
+ transaction.destination,
+ response_code,
+ response_dict,
+ )
diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py
new file mode 100644
index 00000000..54a0c7ad
--- /dev/null
+++ b/synapse/federation/replication.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This layer is responsible for replicating with remote home servers using
+a given transport.
+"""
+
+from .federation_client import FederationClient
+from .federation_server import FederationServer
+
+from .transaction_queue import TransactionQueue
+
+from .persistence import TransactionActions
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationLayer(FederationClient, FederationServer):
+ """This layer is responsible for replicating with remote home servers over
+ the given transport. I.e., does the sending and receiving of PDUs to
+ remote home servers.
+
+ The layer communicates with the rest of the server via a registered
+ ReplicationHandler.
+
+ In more detail, the layer:
+ * Receives incoming data and processes it into transactions and pdus.
+ * Fetches any PDUs it thinks it might have missed.
+ * Keeps the current state for contexts up to date by applying the
+ suitable conflict resolution.
+ * Sends outgoing pdus wrapped in transactions.
+ * Fills out the references to previous pdus/transactions appropriately
+ for outgoing data.
+ """
+
+ def __init__(self, hs, transport_layer):
+ self.server_name = hs.hostname
+
+ self.keyring = hs.get_keyring()
+
+ self.transport_layer = transport_layer
+ self.transport_layer.register_received_handler(self)
+ self.transport_layer.register_request_handler(self)
+
+ self.federation_client = self
+
+ self.store = hs.get_datastore()
+
+ self.handler = None
+ self.edu_handlers = {}
+ self.query_handlers = {}
+
+ self._clock = hs.get_clock()
+
+ self.transaction_actions = TransactionActions(self.store)
+ self._transaction_queue = TransactionQueue(hs, transport_layer)
+
+ self._order = 0
+
+ self.hs = hs
+
+ def __str__(self):
+ return "<ReplicationLayer(%s)>" % self.server_name
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
new file mode 100644
index 00000000..aac6f1c1
--- /dev/null
+++ b/synapse/federation/transaction_queue.py
@@ -0,0 +1,384 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from .persistence import TransactionActions
+from .units import Transaction
+
+from synapse.api.errors import HttpResponseException
+from synapse.util.logutils import log_function
+from synapse.util.logcontext import PreserveLoggingContext
+from synapse.util.retryutils import (
+ get_retry_limiter, NotRetryingDestination,
+)
+import synapse.metrics
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+
+class TransactionQueue(object):
+ """This class makes sure we only have one transaction in flight at
+ a time for a given destination.
+
+ It batches pending PDUs into single transactions.
+ """
+
+ def __init__(self, hs, transport_layer):
+ self.server_name = hs.hostname
+
+ self.store = hs.get_datastore()
+ self.transaction_actions = TransactionActions(self.store)
+
+ self.transport_layer = transport_layer
+
+ self._clock = hs.get_clock()
+
+ # Is a mapping from destinations -> deferreds. Used to keep track
+ # of which destinations have transactions in flight and when they are
+ # done
+ self.pending_transactions = {}
+
+ metrics.register_callback(
+ "pending_destinations",
+ lambda: len(self.pending_transactions),
+ )
+
+ # Is a mapping from destination -> list of
+ # tuple(pending pdus, deferred, order)
+ self.pending_pdus_by_dest = pdus = {}
+ # destination -> list of tuple(edu, deferred)
+ self.pending_edus_by_dest = edus = {}
+
+ metrics.register_callback(
+ "pending_pdus",
+ lambda: sum(map(len, pdus.values())),
+ )
+ metrics.register_callback(
+ "pending_edus",
+ lambda: sum(map(len, edus.values())),
+ )
+
+ # destination -> list of tuple(failure, deferred)
+ self.pending_failures_by_dest = {}
+
+ # HACK to get unique tx id
+ self._next_txn_id = int(self._clock.time_msec())
+
+ def can_send_to(self, destination):
+ """Can we send messages to the given server?
+
+ We can't send messages to ourselves. If we are running on localhost
+ then we can only federation with other servers running on localhost.
+ Otherwise we only federate with servers on a public domain.
+
+ Args:
+ destination(str): The server we are possibly trying to send to.
+ Returns:
+ bool: True if we can send to the server.
+ """
+
+ if destination == self.server_name:
+ return False
+ if self.server_name.startswith("localhost"):
+ return destination.startswith("localhost")
+ else:
+ return not destination.startswith("localhost")
+
+ @defer.inlineCallbacks
+ def enqueue_pdu(self, pdu, destinations, order):
+ # We loop through all destinations to see whether we already have
+ # a transaction in progress. If we do, stick it in the pending_pdus
+ # table and we'll get back to it later.
+
+ destinations = set(destinations)
+ destinations = set(
+ dest for dest in destinations if self.can_send_to(dest)
+ )
+
+ logger.debug("Sending to: %s", str(destinations))
+
+ if not destinations:
+ return
+
+ deferreds = []
+
+ for destination in destinations:
+ deferred = defer.Deferred()
+ self.pending_pdus_by_dest.setdefault(destination, []).append(
+ (pdu, deferred, order)
+ )
+
+ def chain(failure):
+ if not deferred.called:
+ deferred.errback(failure)
+
+ def log_failure(f):
+ logger.warn("Failed to send pdu to %s: %s", destination, f.value)
+
+ deferred.addErrback(log_failure)
+
+ with PreserveLoggingContext():
+ self._attempt_new_transaction(destination).addErrback(chain)
+
+ deferreds.append(deferred)
+
+ yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ # NO inlineCallbacks
+ def enqueue_edu(self, edu):
+ destination = edu.destination
+
+ if not self.can_send_to(destination):
+ return
+
+ deferred = defer.Deferred()
+ self.pending_edus_by_dest.setdefault(destination, []).append(
+ (edu, deferred)
+ )
+
+ def chain(failure):
+ if not deferred.called:
+ deferred.errback(failure)
+
+ def log_failure(f):
+ logger.warn("Failed to send edu to %s: %s", destination, f.value)
+
+ deferred.addErrback(log_failure)
+
+ with PreserveLoggingContext():
+ self._attempt_new_transaction(destination).addErrback(chain)
+
+ return deferred
+
+ @defer.inlineCallbacks
+ def enqueue_failure(self, failure, destination):
+ if destination == self.server_name or destination == "localhost":
+ return
+
+ deferred = defer.Deferred()
+
+ if not self.can_send_to(destination):
+ return
+
+ self.pending_failures_by_dest.setdefault(
+ destination, []
+ ).append(
+ (failure, deferred)
+ )
+
+ def chain(f):
+ if not deferred.called:
+ deferred.errback(f)
+
+ def log_failure(f):
+ logger.warn("Failed to send failure to %s: %s", destination, f.value)
+
+ deferred.addErrback(log_failure)
+
+ with PreserveLoggingContext():
+ self._attempt_new_transaction(destination).addErrback(chain)
+
+ yield deferred
+
+ @defer.inlineCallbacks
+ @log_function
+ def _attempt_new_transaction(self, destination):
+ # list of (pending_pdu, deferred, order)
+ if destination in self.pending_transactions:
+ # XXX: pending_transactions can get stuck on by a never-ending
+ # request at which point pending_pdus_by_dest just keeps growing.
+ # we need application-layer timeouts of some flavour of these
+ # requests
+ logger.debug(
+ "TX [%s] Transaction already in progress",
+ destination
+ )
+ return
+
+ pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
+ pending_edus = self.pending_edus_by_dest.pop(destination, [])
+ pending_failures = self.pending_failures_by_dest.pop(destination, [])
+
+ if pending_pdus:
+ logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
+ destination, len(pending_pdus))
+
+ if not pending_pdus and not pending_edus and not pending_failures:
+ logger.debug("TX [%s] Nothing to send", destination)
+ return
+
+ try:
+ self.pending_transactions[destination] = 1
+
+ logger.debug("TX [%s] _attempt_new_transaction", destination)
+
+ # Sort based on the order field
+ pending_pdus.sort(key=lambda t: t[2])
+
+ pdus = [x[0] for x in pending_pdus]
+ edus = [x[0] for x in pending_edus]
+ failures = [x[0].get_dict() for x in pending_failures]
+ deferreds = [
+ x[1]
+ for x in pending_pdus + pending_edus + pending_failures
+ ]
+
+ txn_id = str(self._next_txn_id)
+
+ limiter = yield get_retry_limiter(
+ destination,
+ self._clock,
+ self.store,
+ )
+
+ logger.debug(
+ "TX [%s] {%s} Attempting new transaction"
+ " (pdus: %d, edus: %d, failures: %d)",
+ destination, txn_id,
+ len(pending_pdus),
+ len(pending_edus),
+ len(pending_failures)
+ )
+
+ logger.debug("TX [%s] Persisting transaction...", destination)
+
+ transaction = Transaction.create_new(
+ origin_server_ts=int(self._clock.time_msec()),
+ transaction_id=txn_id,
+ origin=self.server_name,
+ destination=destination,
+ pdus=pdus,
+ edus=edus,
+ pdu_failures=failures,
+ )
+
+ self._next_txn_id += 1
+
+ yield self.transaction_actions.prepare_to_send(transaction)
+
+ logger.debug("TX [%s] Persisted transaction", destination)
+ logger.info(
+ "TX [%s] {%s} Sending transaction [%s],"
+ " (PDUs: %d, EDUs: %d, failures: %d)",
+ destination, txn_id,
+ transaction.transaction_id,
+ len(pending_pdus),
+ len(pending_edus),
+ len(pending_failures),
+ )
+
+ with limiter:
+ # Actually send the transaction
+
+ # FIXME (erikj): This is a bit of a hack to make the Pdu age
+ # keys work
+ def json_data_cb():
+ data = transaction.get_dict()
+ now = int(self._clock.time_msec())
+ if "pdus" in data:
+ for p in data["pdus"]:
+ if "age_ts" in p:
+ unsigned = p.setdefault("unsigned", {})
+ unsigned["age"] = now - int(p["age_ts"])
+ del p["age_ts"]
+ return data
+
+ try:
+ response = yield self.transport_layer.send_transaction(
+ transaction, json_data_cb
+ )
+ code = 200
+
+ if response:
+ for e_id, r in response.get("pdus", {}).items():
+ if "error" in r:
+ logger.warn(
+ "Transaction returned error for %s: %s",
+ e_id, r,
+ )
+ except HttpResponseException as e:
+ code = e.code
+ response = e.response
+
+ logger.info(
+ "TX [%s] {%s} got %d response",
+ destination, txn_id, code
+ )
+
+ logger.debug("TX [%s] Sent transaction", destination)
+ logger.debug("TX [%s] Marking as delivered...", destination)
+
+ yield self.transaction_actions.delivered(
+ transaction, code, response
+ )
+
+ logger.debug("TX [%s] Marked as delivered", destination)
+
+ logger.debug("TX [%s] Yielding to callbacks...", destination)
+
+ for deferred in deferreds:
+ if code == 200:
+ deferred.callback(None)
+ else:
+ deferred.errback(RuntimeError("Got status %d" % code))
+
+ # Ensures we don't continue until all callbacks on that
+ # deferred have fired
+ try:
+ yield deferred
+ except:
+ pass
+
+ logger.debug("TX [%s] Yielded to callbacks", destination)
+ except NotRetryingDestination:
+ logger.info(
+ "TX [%s] not ready for retry yet - "
+ "dropping transaction for now",
+ destination,
+ )
+ except RuntimeError as e:
+ # We capture this here as there as nothing actually listens
+ # for this finishing functions deferred.
+ logger.warn(
+ "TX [%s] Problem in _attempt_transaction: %s",
+ destination,
+ e,
+ )
+ except Exception as e:
+ # We capture this here as there as nothing actually listens
+ # for this finishing functions deferred.
+ logger.warn(
+ "TX [%s] Problem in _attempt_transaction: %s",
+ destination,
+ e,
+ )
+
+ for deferred in deferreds:
+ if not deferred.called:
+ deferred.errback(e)
+
+ finally:
+ # We want to be *very* sure we delete this after we stop processing
+ self.pending_transactions.pop(destination, None)
+
+ # Check to see if there is anything else to send.
+ self._attempt_new_transaction(destination)
diff --git a/synapse/federation/transport/__init__.py b/synapse/federation/transport/__init__.py
new file mode 100644
index 00000000..2a671b9a
--- /dev/null
+++ b/synapse/federation/transport/__init__.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The transport layer is responsible for both sending transactions to remote
+home servers and receiving a variety of requests from other home servers.
+
+By default this is done over HTTPS (and all home servers are required to
+support HTTPS), however individual pairings of servers may decide to
+communicate over a different (albeit still reliable) protocol.
+"""
+
+from .server import TransportLayerServer
+from .client import TransportLayerClient
+
+from synapse.util.ratelimitutils import FederationRateLimiter
+
+
+class TransportLayer(TransportLayerServer, TransportLayerClient):
+ """This is a basic implementation of the transport layer that translates
+ transactions and other requests to/from HTTP.
+
+ Attributes:
+ server_name (str): Local home server host
+
+ server (synapse.http.server.HttpServer): the http server to
+ register listeners on
+
+ client (synapse.http.client.HttpClient): the http client used to
+ send requests
+
+ request_handler (TransportRequestHandler): The handler to fire when we
+ receive requests for data.
+
+ received_handler (TransportReceivedHandler): The handler to fire when
+ we receive data.
+ """
+
+ def __init__(self, homeserver, server_name, server, client):
+ """
+ Args:
+ server_name (str): Local home server host
+ server (synapse.protocol.http.HttpServer): the http server to
+ register listeners on
+ client (synapse.protocol.http.HttpClient): the http client used to
+ send requests
+ """
+ self.keyring = homeserver.get_keyring()
+ self.clock = homeserver.get_clock()
+ self.server_name = server_name
+ self.server = server
+ self.client = client
+ self.request_handler = None
+ self.received_handler = None
+
+ self.ratelimiter = FederationRateLimiter(
+ self.clock,
+ window_size=homeserver.config.federation_rc_window_size,
+ sleep_limit=homeserver.config.federation_rc_sleep_limit,
+ sleep_msec=homeserver.config.federation_rc_sleep_delay,
+ reject_limit=homeserver.config.federation_rc_reject_limit,
+ concurrent_requests=homeserver.config.federation_rc_concurrent,
+ )
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
new file mode 100644
index 00000000..3d59e1c6
--- /dev/null
+++ b/synapse/federation/transport/client.py
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from synapse.api.constants import Membership
+
+from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.util.logutils import log_function
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class TransportLayerClient(object):
+ """Sends federation HTTP requests to other servers"""
+
+ @log_function
+ def get_room_state(self, destination, room_id, event_id):
+ """ Requests all state for a given room from the given server at the
+ given event.
+
+ Args:
+ destination (str): The host name of the remote home server we want
+ to get the state from.
+ context (str): The name of the context we want the state of
+ event_id (str): The event we want the context at.
+
+ Returns:
+ Deferred: Results in a dict received from the remote homeserver.
+ """
+ logger.debug("get_room_state dest=%s, room=%s",
+ destination, room_id)
+
+ path = PREFIX + "/state/%s/" % room_id
+ return self.client.get_json(
+ destination, path=path, args={"event_id": event_id},
+ )
+
+ @log_function
+ def get_event(self, destination, event_id, timeout=None):
+ """ Requests the pdu with give id and origin from the given server.
+
+ Args:
+ destination (str): The host name of the remote home server we want
+ to get the state from.
+ event_id (str): The id of the event being requested.
+ timeout (int): How long to try (in ms) the destination for before
+ giving up. None indicates no timeout.
+
+ Returns:
+ Deferred: Results in a dict received from the remote homeserver.
+ """
+ logger.debug("get_pdu dest=%s, event_id=%s",
+ destination, event_id)
+
+ path = PREFIX + "/event/%s/" % (event_id, )
+ return self.client.get_json(destination, path=path, timeout=timeout)
+
+ @log_function
+ def backfill(self, destination, room_id, event_tuples, limit):
+ """ Requests `limit` previous PDUs in a given context before list of
+ PDUs.
+
+ Args:
+ dest (str)
+ room_id (str)
+ event_tuples (list)
+ limt (int)
+
+ Returns:
+ Deferred: Results in a dict received from the remote homeserver.
+ """
+ logger.debug(
+ "backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
+ destination, room_id, repr(event_tuples), str(limit)
+ )
+
+ if not event_tuples:
+ # TODO: raise?
+ return
+
+ path = PREFIX + "/backfill/%s/" % (room_id,)
+
+ args = {
+ "v": event_tuples,
+ "limit": [str(limit)],
+ }
+
+ return self.client.get_json(
+ destination,
+ path=path,
+ args=args,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def send_transaction(self, transaction, json_data_callback=None):
+ """ Sends the given Transaction to its destination
+
+ Args:
+ transaction (Transaction)
+
+ Returns:
+ Deferred: Results of the deferred is a tuple in the form of
+ (response_code, response_body) where the response_body is a
+ python dict decoded from json
+ """
+ logger.debug(
+ "send_data dest=%s, txid=%s",
+ transaction.destination, transaction.transaction_id
+ )
+
+ if transaction.destination == self.server_name:
+ raise RuntimeError("Transport layer cannot send to itself!")
+
+ # FIXME: This is only used by the tests. The actual json sent is
+ # generated by the json_data_callback.
+ json_data = transaction.get_dict()
+
+ response = yield self.client.put_json(
+ transaction.destination,
+ path=PREFIX + "/send/%s/" % transaction.transaction_id,
+ data=json_data,
+ json_data_callback=json_data_callback,
+ )
+
+ logger.debug(
+ "send_data dest=%s, txid=%s, got response: 200",
+ transaction.destination, transaction.transaction_id,
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ @log_function
+ def make_query(self, destination, query_type, args, retry_on_dns_fail):
+ path = PREFIX + "/query/%s" % query_type
+
+ content = yield self.client.get_json(
+ destination=destination,
+ path=path,
+ args=args,
+ retry_on_dns_fail=retry_on_dns_fail,
+ )
+
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def make_membership_event(self, destination, room_id, user_id, membership):
+ valid_memberships = {Membership.JOIN, Membership.LEAVE}
+ if membership not in valid_memberships:
+ raise RuntimeError(
+ "make_membership_event called with membership='%s', must be one of %s" %
+ (membership, ",".join(valid_memberships))
+ )
+ path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id)
+
+ content = yield self.client.get_json(
+ destination=destination,
+ path=path,
+ retry_on_dns_fail=True,
+ )
+
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def send_join(self, destination, room_id, event_id, content):
+ path = PREFIX + "/send_join/%s/%s" % (room_id, event_id)
+
+ response = yield self.client.put_json(
+ destination=destination,
+ path=path,
+ data=content,
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ @log_function
+ def send_leave(self, destination, room_id, event_id, content):
+ path = PREFIX + "/send_leave/%s/%s" % (room_id, event_id)
+
+ response = yield self.client.put_json(
+ destination=destination,
+ path=path,
+ data=content,
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ @log_function
+ def send_invite(self, destination, room_id, event_id, content):
+ path = PREFIX + "/invite/%s/%s" % (room_id, event_id)
+
+ response = yield self.client.put_json(
+ destination=destination,
+ path=path,
+ data=content,
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ @log_function
+ def exchange_third_party_invite(self, destination, room_id, event_dict):
+ path = PREFIX + "/exchange_third_party_invite/%s" % (room_id,)
+
+ response = yield self.client.put_json(
+ destination=destination,
+ path=path,
+ data=event_dict,
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_event_auth(self, destination, room_id, event_id):
+ path = PREFIX + "/event_auth/%s/%s" % (room_id, event_id)
+
+ content = yield self.client.get_json(
+ destination=destination,
+ path=path,
+ )
+
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def send_query_auth(self, destination, room_id, event_id, content):
+ path = PREFIX + "/query_auth/%s/%s" % (room_id, event_id)
+
+ content = yield self.client.post_json(
+ destination=destination,
+ path=path,
+ data=content,
+ )
+
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def query_client_keys(self, destination, query_content):
+ """Query the device keys for a list of user ids hosted on a remote
+ server.
+
+ Request:
+ {
+ "device_keys": {
+ "<user_id>": ["<device_id>"]
+ } }
+
+ Response:
+ {
+ "device_keys": {
+ "<user_id>": {
+ "<device_id>": {...}
+ } } }
+
+ Args:
+ destination(str): The server to query.
+ query_content(dict): The user ids to query.
+ Returns:
+ A dict containg the device keys.
+ """
+ path = PREFIX + "/user/keys/query"
+
+ content = yield self.client.post_json(
+ destination=destination,
+ path=path,
+ data=query_content,
+ )
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def claim_client_keys(self, destination, query_content):
+ """Claim one-time keys for a list of devices hosted on a remote server.
+
+ Request:
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": "<algorithm>"
+ } } }
+
+ Response:
+ {
+ "device_keys": {
+ "<user_id>": {
+ "<device_id>": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ } } } }
+
+ Args:
+ destination(str): The server to query.
+ query_content(dict): The user ids to query.
+ Returns:
+ A dict containg the one-time keys.
+ """
+
+ path = PREFIX + "/user/keys/claim"
+
+ content = yield self.client.post_json(
+ destination=destination,
+ path=path,
+ data=query_content,
+ )
+ defer.returnValue(content)
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_missing_events(self, destination, room_id, earliest_events,
+ latest_events, limit, min_depth):
+ path = PREFIX + "/get_missing_events/%s" % (room_id,)
+
+ content = yield self.client.post_json(
+ destination=destination,
+ path=path,
+ data={
+ "limit": int(limit),
+ "min_depth": int(min_depth),
+ "earliest_events": earliest_events,
+ "latest_events": latest_events,
+ }
+ )
+
+ defer.returnValue(content)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
new file mode 100644
index 00000000..127b4da4
--- /dev/null
+++ b/synapse/federation/transport/server.py
@@ -0,0 +1,453 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.urls import FEDERATION_PREFIX as PREFIX
+from synapse.api.errors import Codes, SynapseError
+from synapse.util.logutils import log_function
+
+import functools
+import logging
+import simplejson as json
+import re
+
+
+logger = logging.getLogger(__name__)
+
+
+class TransportLayerServer(object):
+ """Handles incoming federation HTTP requests"""
+
+ # A method just so we can pass 'self' as the authenticator to the Servlets
+ @defer.inlineCallbacks
+ def authenticate_request(self, request):
+ json_request = {
+ "method": request.method,
+ "uri": request.uri,
+ "destination": self.server_name,
+ "signatures": {},
+ }
+
+ content = None
+ origin = None
+
+ if request.method in ["PUT", "POST"]:
+ # TODO: Handle other method types? other content types?
+ try:
+ content_bytes = request.content.read()
+ content = json.loads(content_bytes)
+ json_request["content"] = content
+ except:
+ raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
+
+ def parse_auth_header(header_str):
+ try:
+ params = auth.split(" ")[1].split(",")
+ param_dict = dict(kv.split("=") for kv in params)
+
+ def strip_quotes(value):
+ if value.startswith("\""):
+ return value[1:-1]
+ else:
+ return value
+
+ origin = strip_quotes(param_dict["origin"])
+ key = strip_quotes(param_dict["key"])
+ sig = strip_quotes(param_dict["sig"])
+ return (origin, key, sig)
+ except:
+ raise SynapseError(
+ 400, "Malformed Authorization header", Codes.UNAUTHORIZED
+ )
+
+ auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
+
+ if not auth_headers:
+ raise SynapseError(
+ 401, "Missing Authorization headers", Codes.UNAUTHORIZED,
+ )
+
+ for auth in auth_headers:
+ if auth.startswith("X-Matrix"):
+ (origin, key, sig) = parse_auth_header(auth)
+ json_request["origin"] = origin
+ json_request["signatures"].setdefault(origin, {})[key] = sig
+
+ if not json_request["signatures"]:
+ raise SynapseError(
+ 401, "Missing Authorization headers", Codes.UNAUTHORIZED,
+ )
+
+ yield self.keyring.verify_json_for_server(origin, json_request)
+
+ logger.info("Request from %s", origin)
+ request.authenticated_entity = origin
+
+ defer.returnValue((origin, content))
+
+ @log_function
+ def register_received_handler(self, handler):
+ """ Register a handler that will be fired when we receive data.
+
+ Args:
+ handler (TransportReceivedHandler)
+ """
+ FederationSendServlet(
+ handler,
+ authenticator=self,
+ ratelimiter=self.ratelimiter,
+ server_name=self.server_name,
+ ).register(self.server)
+
+ @log_function
+ def register_request_handler(self, handler):
+ """ Register a handler that will be fired when we get asked for data.
+
+ Args:
+ handler (TransportRequestHandler)
+ """
+ for servletclass in SERVLET_CLASSES:
+ servletclass(
+ handler,
+ authenticator=self,
+ ratelimiter=self.ratelimiter,
+ ).register(self.server)
+
+
+class BaseFederationServlet(object):
+ def __init__(self, handler, authenticator, ratelimiter):
+ self.handler = handler
+ self.authenticator = authenticator
+ self.ratelimiter = ratelimiter
+
+ def _wrap(self, code):
+ authenticator = self.authenticator
+ ratelimiter = self.ratelimiter
+
+ @defer.inlineCallbacks
+ @functools.wraps(code)
+ def new_code(request, *args, **kwargs):
+ try:
+ (origin, content) = yield authenticator.authenticate_request(request)
+ with ratelimiter.ratelimit(origin) as d:
+ yield d
+ response = yield code(
+ origin, content, request.args, *args, **kwargs
+ )
+ except:
+ logger.exception("authenticate_request failed")
+ raise
+ defer.returnValue(response)
+
+ # Extra logic that functools.wraps() doesn't finish
+ new_code.__self__ = code.__self__
+
+ return new_code
+
+ def register(self, server):
+ pattern = re.compile("^" + PREFIX + self.PATH + "$")
+
+ for method in ("GET", "PUT", "POST"):
+ code = getattr(self, "on_%s" % (method), None)
+ if code is None:
+ continue
+
+ server.register_path(method, pattern, self._wrap(code))
+
+
+class FederationSendServlet(BaseFederationServlet):
+ PATH = "/send/([^/]*)/"
+
+ def __init__(self, handler, server_name, **kwargs):
+ super(FederationSendServlet, self).__init__(handler, **kwargs)
+ self.server_name = server_name
+
+ # This is when someone is trying to send us a bunch of data.
+ @defer.inlineCallbacks
+ def on_PUT(self, origin, content, query, transaction_id):
+ """ Called on PUT /send/<transaction_id>/
+
+ Args:
+ request (twisted.web.http.Request): The HTTP request.
+ transaction_id (str): The transaction_id associated with this
+ request. This is *not* None.
+
+ Returns:
+ Deferred: Results in a tuple of `(code, response)`, where
+ `response` is a python dict to be converted into JSON that is
+ used as the response body.
+ """
+ # Parse the request
+ try:
+ transaction_data = content
+
+ logger.debug(
+ "Decoded %s: %s",
+ transaction_id, str(transaction_data)
+ )
+
+ logger.info(
+ "Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
+ transaction_id, origin,
+ len(transaction_data.get("pdus", [])),
+ len(transaction_data.get("edus", [])),
+ len(transaction_data.get("failures", [])),
+ )
+
+ # We should ideally be getting this from the security layer.
+ # origin = body["origin"]
+
+ # Add some extra data to the transaction dict that isn't included
+ # in the request body.
+ transaction_data.update(
+ transaction_id=transaction_id,
+ destination=self.server_name
+ )
+
+ except Exception as e:
+ logger.exception(e)
+ defer.returnValue((400, {"error": "Invalid transaction"}))
+ return
+
+ try:
+ code, response = yield self.handler.on_incoming_transaction(
+ transaction_data
+ )
+ except:
+ logger.exception("on_incoming_transaction failed")
+ raise
+
+ defer.returnValue((code, response))
+
+
+class FederationPullServlet(BaseFederationServlet):
+ PATH = "/pull/"
+
+ # This is for when someone asks us for everything since version X
+ def on_GET(self, origin, content, query):
+ return self.handler.on_pull_request(query["origin"][0], query["v"])
+
+
+class FederationEventServlet(BaseFederationServlet):
+ PATH = "/event/([^/]*)/"
+
+ # This is when someone asks for a data item for a given server data_id pair.
+ def on_GET(self, origin, content, query, event_id):
+ return self.handler.on_pdu_request(origin, event_id)
+
+
+class FederationStateServlet(BaseFederationServlet):
+ PATH = "/state/([^/]*)/"
+
+ # This is when someone asks for all data for a given context.
+ def on_GET(self, origin, content, query, context):
+ return self.handler.on_context_state_request(
+ origin,
+ context,
+ query.get("event_id", [None])[0],
+ )
+
+
+class FederationBackfillServlet(BaseFederationServlet):
+ PATH = "/backfill/([^/]*)/"
+
+ def on_GET(self, origin, content, query, context):
+ versions = query["v"]
+ limits = query["limit"]
+
+ if not limits:
+ return defer.succeed((400, {"error": "Did not include limit param"}))
+
+ limit = int(limits[-1])
+
+ return self.handler.on_backfill_request(origin, context, versions, limit)
+
+
+class FederationQueryServlet(BaseFederationServlet):
+ PATH = "/query/([^/]*)"
+
+ # This is when we receive a server-server Query
+ def on_GET(self, origin, content, query, query_type):
+ return self.handler.on_query_request(
+ query_type,
+ {k: v[0].decode("utf-8") for k, v in query.items()}
+ )
+
+
+class FederationMakeJoinServlet(BaseFederationServlet):
+ PATH = "/make_join/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_GET(self, origin, content, query, context, user_id):
+ content = yield self.handler.on_make_join_request(context, user_id)
+ defer.returnValue((200, content))
+
+
+class FederationMakeLeaveServlet(BaseFederationServlet):
+ PATH = "/make_leave/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_GET(self, origin, content, query, context, user_id):
+ content = yield self.handler.on_make_leave_request(context, user_id)
+ defer.returnValue((200, content))
+
+
+class FederationSendLeaveServlet(BaseFederationServlet):
+ PATH = "/send_leave/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_PUT(self, origin, content, query, room_id, txid):
+ content = yield self.handler.on_send_leave_request(origin, content)
+ defer.returnValue((200, content))
+
+
+class FederationEventAuthServlet(BaseFederationServlet):
+ PATH = "/event_auth/([^/]*)/([^/]*)"
+
+ def on_GET(self, origin, content, query, context, event_id):
+ return self.handler.on_event_auth(origin, context, event_id)
+
+
+class FederationSendJoinServlet(BaseFederationServlet):
+ PATH = "/send_join/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_PUT(self, origin, content, query, context, event_id):
+ # TODO(paul): assert that context/event_id parsed from path actually
+ # match those given in content
+ content = yield self.handler.on_send_join_request(origin, content)
+ defer.returnValue((200, content))
+
+
+class FederationInviteServlet(BaseFederationServlet):
+ PATH = "/invite/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_PUT(self, origin, content, query, context, event_id):
+ # TODO(paul): assert that context/event_id parsed from path actually
+ # match those given in content
+ content = yield self.handler.on_invite_request(origin, content)
+ defer.returnValue((200, content))
+
+
+class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
+ PATH = "/exchange_third_party_invite/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_PUT(self, origin, content, query, room_id):
+ content = yield self.handler.on_exchange_third_party_invite_request(
+ origin, room_id, content
+ )
+ defer.returnValue((200, content))
+
+
+class FederationClientKeysQueryServlet(BaseFederationServlet):
+ PATH = "/user/keys/query"
+
+ @defer.inlineCallbacks
+ def on_POST(self, origin, content, query):
+ response = yield self.handler.on_query_client_keys(origin, content)
+ defer.returnValue((200, response))
+
+
+class FederationClientKeysClaimServlet(BaseFederationServlet):
+ PATH = "/user/keys/claim"
+
+ @defer.inlineCallbacks
+ def on_POST(self, origin, content, query):
+ response = yield self.handler.on_claim_client_keys(origin, content)
+ defer.returnValue((200, response))
+
+
+class FederationQueryAuthServlet(BaseFederationServlet):
+ PATH = "/query_auth/([^/]*)/([^/]*)"
+
+ @defer.inlineCallbacks
+ def on_POST(self, origin, content, query, context, event_id):
+ new_content = yield self.handler.on_query_auth_request(
+ origin, content, event_id
+ )
+
+ defer.returnValue((200, new_content))
+
+
+class FederationGetMissingEventsServlet(BaseFederationServlet):
+ # TODO(paul): Why does this path alone end with "/?" optional?
+ PATH = "/get_missing_events/([^/]*)/?"
+
+ @defer.inlineCallbacks
+ def on_POST(self, origin, content, query, room_id):
+ limit = int(content.get("limit", 10))
+ min_depth = int(content.get("min_depth", 0))
+ earliest_events = content.get("earliest_events", [])
+ latest_events = content.get("latest_events", [])
+
+ content = yield self.handler.on_get_missing_events(
+ origin,
+ room_id=room_id,
+ earliest_events=earliest_events,
+ latest_events=latest_events,
+ min_depth=min_depth,
+ limit=limit,
+ )
+
+ defer.returnValue((200, content))
+
+
+class On3pidBindServlet(BaseFederationServlet):
+ PATH = "/3pid/onbind"
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ content_bytes = request.content.read()
+ content = json.loads(content_bytes)
+ if "invites" in content:
+ last_exception = None
+ for invite in content["invites"]:
+ try:
+ yield self.handler.exchange_third_party_invite(invite)
+ except Exception as e:
+ last_exception = e
+ if last_exception:
+ raise last_exception
+ defer.returnValue((200, {}))
+
+ # Avoid doing remote HS authorization checks which are done by default by
+ # BaseFederationServlet.
+ def _wrap(self, code):
+ return code
+
+
+SERVLET_CLASSES = (
+ FederationPullServlet,
+ FederationEventServlet,
+ FederationStateServlet,
+ FederationBackfillServlet,
+ FederationQueryServlet,
+ FederationMakeJoinServlet,
+ FederationMakeLeaveServlet,
+ FederationEventServlet,
+ FederationSendJoinServlet,
+ FederationSendLeaveServlet,
+ FederationInviteServlet,
+ FederationQueryAuthServlet,
+ FederationGetMissingEventsServlet,
+ FederationEventAuthServlet,
+ FederationClientKeysQueryServlet,
+ FederationClientKeysClaimServlet,
+ FederationThirdPartyInviteExchangeServlet,
+ On3pidBindServlet,
+)
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
new file mode 100644
index 00000000..816f55bf
--- /dev/null
+++ b/synapse/federation/units.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Defines the JSON structure of the protocol units used by the server to
+server protocol.
+"""
+
+from synapse.util.jsonobject import JsonEncodedObject
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class Edu(JsonEncodedObject):
+ """ An Edu represents a piece of data sent from one homeserver to another.
+
+ In comparison to Pdus, Edus are not persisted for a long time on disk, are
+ not meaningful beyond a given pair of homeservers, and don't have an
+ internal ID or previous references graph.
+ """
+
+ valid_keys = [
+ "origin",
+ "destination",
+ "edu_type",
+ "content",
+ ]
+
+ required_keys = [
+ "edu_type",
+ ]
+
+ internal_keys = [
+ "origin",
+ "destination",
+ ]
+
+
+class Transaction(JsonEncodedObject):
+ """ A transaction is a list of Pdus and Edus to be sent to a remote home
+ server with some extra metadata.
+
+ Example transaction::
+
+ {
+ "origin": "foo",
+ "prev_ids": ["abc", "def"],
+ "pdus": [
+ ...
+ ],
+ }
+
+ """
+
+ valid_keys = [
+ "transaction_id",
+ "origin",
+ "destination",
+ "origin_server_ts",
+ "previous_ids",
+ "pdus",
+ "edus",
+ "transaction_id",
+ "destination",
+ "pdu_failures",
+ ]
+
+ internal_keys = [
+ "transaction_id",
+ "destination",
+ ]
+
+ required_keys = [
+ "transaction_id",
+ "origin",
+ "destination",
+ "origin_server_ts",
+ "pdus",
+ ]
+
+ def __init__(self, transaction_id=None, pdus=[], **kwargs):
+ """ If we include a list of pdus then we decode then as PDU's
+ automatically.
+ """
+
+ # If there's no EDUs then remove the arg
+ if "edus" in kwargs and not kwargs["edus"]:
+ del kwargs["edus"]
+
+ super(Transaction, self).__init__(
+ transaction_id=transaction_id,
+ pdus=pdus,
+ **kwargs
+ )
+
+ @staticmethod
+ def create_new(pdus, **kwargs):
+ """ Used to create a new transaction. Will auto fill out
+ transaction_id and origin_server_ts keys.
+ """
+ if "origin_server_ts" not in kwargs:
+ raise KeyError(
+ "Require 'origin_server_ts' to construct a Transaction"
+ )
+ if "transaction_id" not in kwargs:
+ raise KeyError(
+ "Require 'transaction_id' to construct a Transaction"
+ )
+
+ for p in pdus:
+ p.transaction_id = kwargs["transaction_id"]
+
+ kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
+
+ return Transaction(**kwargs)
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
new file mode 100644
index 00000000..6a2339f2
--- /dev/null
+++ b/synapse/handlers/__init__.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.appservice.scheduler import AppServiceScheduler
+from synapse.appservice.api import ApplicationServiceApi
+from .register import RegistrationHandler
+from .room import (
+ RoomCreationHandler, RoomMemberHandler, RoomListHandler, RoomContextHandler,
+)
+from .message import MessageHandler
+from .events import EventStreamHandler, EventHandler
+from .federation import FederationHandler
+from .profile import ProfileHandler
+from .presence import PresenceHandler
+from .directory import DirectoryHandler
+from .typing import TypingNotificationHandler
+from .admin import AdminHandler
+from .appservice import ApplicationServicesHandler
+from .sync import SyncHandler
+from .auth import AuthHandler
+from .identity import IdentityHandler
+from .receipts import ReceiptsHandler
+from .search import SearchHandler
+
+
+class Handlers(object):
+
+ """ A collection of all the event handlers.
+
+ There's no need to lazily create these; we'll just make them all eagerly
+ at construction time.
+ """
+
+ def __init__(self, hs):
+ self.registration_handler = RegistrationHandler(hs)
+ self.message_handler = MessageHandler(hs)
+ self.room_creation_handler = RoomCreationHandler(hs)
+ self.room_member_handler = RoomMemberHandler(hs)
+ self.event_stream_handler = EventStreamHandler(hs)
+ self.event_handler = EventHandler(hs)
+ self.federation_handler = FederationHandler(hs)
+ self.profile_handler = ProfileHandler(hs)
+ self.presence_handler = PresenceHandler(hs)
+ self.room_list_handler = RoomListHandler(hs)
+ self.directory_handler = DirectoryHandler(hs)
+ self.typing_notification_handler = TypingNotificationHandler(hs)
+ self.admin_handler = AdminHandler(hs)
+ self.receipts_handler = ReceiptsHandler(hs)
+ asapi = ApplicationServiceApi(hs)
+ self.appservice_handler = ApplicationServicesHandler(
+ hs, asapi, AppServiceScheduler(
+ clock=hs.get_clock(),
+ store=hs.get_datastore(),
+ as_api=asapi
+ )
+ )
+ self.sync_handler = SyncHandler(hs)
+ self.auth_handler = AuthHandler(hs)
+ self.identity_handler = IdentityHandler(hs)
+ self.search_handler = SearchHandler(hs)
+ self.room_context_handler = RoomContextHandler(hs)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
new file mode 100644
index 00000000..6519f183
--- /dev/null
+++ b/synapse/handlers/_base.py
@@ -0,0 +1,347 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import LimitExceededError, SynapseError, AuthError
+from synapse.crypto.event_signing import add_hashes_and_signatures
+from synapse.api.constants import Membership, EventTypes
+from synapse.types import UserID, RoomAlias
+
+from synapse.util.logcontext import PreserveLoggingContext
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class BaseHandler(object):
+ """
+ Common base class for the event handlers.
+
+ :type store: synapse.storage.events.StateStore
+ :type state_handler: synapse.state.StateHandler
+ """
+
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.notifier = hs.get_notifier()
+ self.state_handler = hs.get_state_handler()
+ self.distributor = hs.get_distributor()
+ self.ratelimiter = hs.get_ratelimiter()
+ self.clock = hs.get_clock()
+ self.hs = hs
+
+ self.signing_key = hs.config.signing_key[0]
+ self.server_name = hs.hostname
+
+ self.event_builder_factory = hs.get_event_builder_factory()
+
+ @defer.inlineCallbacks
+ def _filter_events_for_client(self, user_id, events, is_guest=False,
+ require_all_visible_for_guests=True):
+ # Assumes that user has at some point joined the room if not is_guest.
+
+ def allowed(event, membership, visibility):
+ if visibility == "world_readable":
+ return True
+
+ if is_guest:
+ return False
+
+ if membership == Membership.JOIN:
+ return True
+
+ if event.type == EventTypes.RoomHistoryVisibility:
+ return not is_guest
+
+ if visibility == "shared":
+ return True
+ elif visibility == "joined":
+ return membership == Membership.JOIN
+ elif visibility == "invited":
+ return membership == Membership.INVITE
+
+ return True
+
+ event_id_to_state = yield self.store.get_state_for_events(
+ frozenset(e.event_id for e in events),
+ types=(
+ (EventTypes.RoomHistoryVisibility, ""),
+ (EventTypes.Member, user_id),
+ )
+ )
+
+ events_to_return = []
+ for event in events:
+ state = event_id_to_state[event.event_id]
+
+ membership_event = state.get((EventTypes.Member, user_id), None)
+ if membership_event:
+ membership = membership_event.membership
+ else:
+ membership = None
+
+ visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
+ if visibility_event:
+ visibility = visibility_event.content.get("history_visibility", "shared")
+ else:
+ visibility = "shared"
+
+ should_include = allowed(event, membership, visibility)
+ if should_include:
+ events_to_return.append(event)
+
+ if (require_all_visible_for_guests
+ and is_guest
+ and len(events_to_return) < len(events)):
+ # This indicates that some events in the requested range were not
+ # visible to guest users. To be safe, we reject the entire request,
+ # so that we don't have to worry about interpreting visibility
+ # boundaries.
+ raise AuthError(403, "User %s does not have permission" % (
+ user_id
+ ))
+
+ defer.returnValue(events_to_return)
+
+ def ratelimit(self, user_id):
+ time_now = self.clock.time()
+ allowed, time_allowed = self.ratelimiter.send_message(
+ user_id, time_now,
+ msg_rate_hz=self.hs.config.rc_messages_per_second,
+ burst_count=self.hs.config.rc_message_burst_count,
+ )
+ if not allowed:
+ raise LimitExceededError(
+ retry_after_ms=int(1000*(time_allowed - time_now)),
+ )
+
+ @defer.inlineCallbacks
+ def _create_new_client_event(self, builder):
+ latest_ret = yield self.store.get_latest_events_in_room(
+ builder.room_id,
+ )
+
+ if latest_ret:
+ depth = max([d for _, _, d in latest_ret]) + 1
+ else:
+ depth = 1
+
+ prev_events = [(e, h) for e, h, _ in latest_ret]
+
+ builder.prev_events = prev_events
+ builder.depth = depth
+
+ state_handler = self.state_handler
+
+ context = yield state_handler.compute_event_context(builder)
+
+ if builder.is_state():
+ builder.prev_state = yield self.store.add_event_hashes(
+ context.prev_state_events
+ )
+
+ yield self.auth.add_auth_events(builder, context)
+
+ add_hashes_and_signatures(
+ builder, self.server_name, self.signing_key
+ )
+
+ event = builder.build()
+
+ logger.debug(
+ "Created event %s with current state: %s",
+ event.event_id, context.current_state,
+ )
+
+ defer.returnValue(
+ (event, context,)
+ )
+
+ @defer.inlineCallbacks
+ def handle_new_client_event(self, event, context, extra_destinations=[],
+ extra_users=[], suppress_auth=False):
+ # We now need to go and hit out to wherever we need to hit out to.
+
+ if not suppress_auth:
+ self.auth.check(event, auth_events=context.current_state)
+
+ yield self.maybe_kick_guest_users(event, context.current_state.values())
+
+ if event.type == EventTypes.CanonicalAlias:
+ # Check the alias is acually valid (at this time at least)
+ room_alias_str = event.content.get("alias", None)
+ if room_alias_str:
+ room_alias = RoomAlias.from_string(room_alias_str)
+ directory_handler = self.hs.get_handlers().directory_handler
+ mapping = yield directory_handler.get_association(room_alias)
+
+ if mapping["room_id"] != event.room_id:
+ raise SynapseError(
+ 400,
+ "Room alias %s does not point to the room" % (
+ room_alias_str,
+ )
+ )
+
+ federation_handler = self.hs.get_handlers().federation_handler
+
+ if event.type == EventTypes.Member:
+ if event.content["membership"] == Membership.INVITE:
+ event.unsigned["invite_room_state"] = [
+ {
+ "type": e.type,
+ "state_key": e.state_key,
+ "content": e.content,
+ "sender": e.sender,
+ }
+ for k, e in context.current_state.items()
+ if e.type in (
+ EventTypes.JoinRules,
+ EventTypes.CanonicalAlias,
+ EventTypes.RoomAvatar,
+ EventTypes.Name,
+ )
+ ]
+
+ invitee = UserID.from_string(event.state_key)
+ if not self.hs.is_mine(invitee):
+ # TODO: Can we add signature from remote server in a nicer
+ # way? If we have been invited by a remote server, we need
+ # to get them to sign the event.
+
+ returned_invite = yield federation_handler.send_invite(
+ invitee.domain,
+ event,
+ )
+
+ event.unsigned.pop("room_state", None)
+
+ # TODO: Make sure the signatures actually are correct.
+ event.signatures.update(
+ returned_invite.signatures
+ )
+
+ if event.type == EventTypes.Redaction:
+ if self.auth.check_redaction(event, auth_events=context.current_state):
+ original_event = yield self.store.get_event(
+ event.redacts,
+ check_redacted=False,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=False
+ )
+ if event.user_id != original_event.user_id:
+ raise AuthError(
+ 403,
+ "You don't have permission to redact events"
+ )
+
+ (event_stream_id, max_stream_id) = yield self.store.persist_event(
+ event, context=context
+ )
+
+ destinations = set(extra_destinations)
+ for k, s in context.current_state.items():
+ try:
+ if k[0] == EventTypes.Member:
+ if s.content["membership"] == Membership.JOIN:
+ destinations.add(
+ UserID.from_string(s.state_key).domain
+ )
+ except SynapseError:
+ logger.warn(
+ "Failed to get destination from event %s", s.event_id
+ )
+
+ with PreserveLoggingContext():
+ # Don't block waiting on waking up all the listeners.
+ notify_d = self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id,
+ extra_users=extra_users
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ event.event_id, f.value
+ )
+
+ notify_d.addErrback(log_failure)
+
+ # If invite, remove room_state from unsigned before sending.
+ event.unsigned.pop("invite_room_state", None)
+
+ federation_handler.handle_new_event(
+ event, destinations=destinations,
+ )
+
+ @defer.inlineCallbacks
+ def maybe_kick_guest_users(self, event, current_state):
+ # Technically this function invalidates current_state by changing it.
+ # Hopefully this isn't that important to the caller.
+ if event.type == EventTypes.GuestAccess:
+ guest_access = event.content.get("guest_access", "forbidden")
+ if guest_access != "can_join":
+ yield self.kick_guest_users(current_state)
+
+ @defer.inlineCallbacks
+ def kick_guest_users(self, current_state):
+ for member_event in current_state:
+ try:
+ if member_event.type != EventTypes.Member:
+ continue
+
+ if not self.hs.is_mine(UserID.from_string(member_event.state_key)):
+ continue
+
+ if member_event.content["membership"] not in {
+ Membership.JOIN,
+ Membership.INVITE
+ }:
+ continue
+
+ if (
+ "kind" not in member_event.content
+ or member_event.content["kind"] != "guest"
+ ):
+ continue
+
+ # We make the user choose to leave, rather than have the
+ # event-sender kick them. This is partially because we don't
+ # need to worry about power levels, and partially because guest
+ # users are a concept which doesn't hugely work over federation,
+ # and having homeservers have their own users leave keeps more
+ # of that decision-making and control local to the guest-having
+ # homeserver.
+ message_handler = self.hs.get_handlers().message_handler
+ yield message_handler.create_and_send_event(
+ {
+ "type": EventTypes.Member,
+ "state_key": member_event.state_key,
+ "content": {
+ "membership": Membership.LEAVE,
+ "kind": "guest"
+ },
+ "room_id": member_event.room_id,
+ "sender": member_event.state_key
+ },
+ ratelimit=False,
+ )
+ except Exception as e:
+ logger.warn("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
new file mode 100644
index 00000000..d852a185
--- /dev/null
+++ b/synapse/handlers/admin.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import BaseHandler
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class AdminHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(AdminHandler, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def get_whois(self, user):
+ res = yield self.store.get_user_ip_and_agents(user)
+
+ d = {}
+ for r in res:
+ # Note that device_id is always None
+ device = d.setdefault(r["device_id"], {})
+ session = device.setdefault(r["access_token"], [])
+ session.append({
+ "ip": r["ip"],
+ "user_agent": r["user_agent"],
+ "last_seen": r["last_seen"],
+ })
+
+ ret = {
+ "user_id": user.to_string(),
+ "devices": [
+ {
+ "device_id": k,
+ "sessions": [
+ {
+ # "access_token": x, TODO (erikj)
+ "connections": y,
+ }
+ for x, y in v.items()
+ ]
+ }
+ for k, v in d.items()
+ ],
+ }
+
+ defer.returnValue(ret)
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
new file mode 100644
index 00000000..1240e516
--- /dev/null
+++ b/synapse/handlers/appservice.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.appservice import ApplicationService
+from synapse.types import UserID
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def log_failure(failure):
+ logger.error(
+ "Application Services Failure",
+ exc_info=(
+ failure.type,
+ failure.value,
+ failure.getTracebackObject()
+ )
+ )
+
+
+# NB: Purposefully not inheriting BaseHandler since that contains way too much
+# setup code which this handler does not need or use. This makes testing a lot
+# easier.
+class ApplicationServicesHandler(object):
+
+ def __init__(self, hs, appservice_api, appservice_scheduler):
+ self.store = hs.get_datastore()
+ self.hs = hs
+ self.appservice_api = appservice_api
+ self.scheduler = appservice_scheduler
+ self.started_scheduler = False
+
+ @defer.inlineCallbacks
+ def notify_interested_services(self, event):
+ """Notifies (pushes) all application services interested in this event.
+
+ Pushing is done asynchronously, so this method won't block for any
+ prolonged length of time.
+
+ Args:
+ event(Event): The event to push out to interested services.
+ """
+ # Gather interested services
+ services = yield self._get_services_for_event(event)
+ if len(services) == 0:
+ return # no services need notifying
+
+ # Do we know this user exists? If not, poke the user query API for
+ # all services which match that user regex. This needs to block as these
+ # user queries need to be made BEFORE pushing the event.
+ yield self._check_user_exists(event.sender)
+ if event.type == EventTypes.Member:
+ yield self._check_user_exists(event.state_key)
+
+ if not self.started_scheduler:
+ self.scheduler.start().addErrback(log_failure)
+ self.started_scheduler = True
+
+ # Fork off pushes to these services
+ for service in services:
+ self.scheduler.submit_event_for_as(service, event)
+
+ @defer.inlineCallbacks
+ def query_user_exists(self, user_id):
+ """Check if any application service knows this user_id exists.
+
+ Args:
+ user_id(str): The user to query if they exist on any AS.
+ Returns:
+ True if this user exists on at least one application service.
+ """
+ user_query_services = yield self._get_services_for_user(
+ user_id=user_id
+ )
+ for user_service in user_query_services:
+ is_known_user = yield self.appservice_api.query_user(
+ user_service, user_id
+ )
+ if is_known_user:
+ defer.returnValue(True)
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def query_room_alias_exists(self, room_alias):
+ """Check if an application service knows this room alias exists.
+
+ Args:
+ room_alias(RoomAlias): The room alias to query.
+ Returns:
+ namedtuple: with keys "room_id" and "servers" or None if no
+ association can be found.
+ """
+ room_alias_str = room_alias.to_string()
+ alias_query_services = yield self._get_services_for_event(
+ event=None,
+ restrict_to=ApplicationService.NS_ALIASES,
+ alias_list=[room_alias_str]
+ )
+ for alias_service in alias_query_services:
+ is_known_alias = yield self.appservice_api.query_alias(
+ alias_service, room_alias_str
+ )
+ if is_known_alias:
+ # the alias exists now so don't query more ASes.
+ result = yield self.store.get_association_from_room_alias(
+ room_alias
+ )
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _get_services_for_event(self, event, restrict_to="", alias_list=None):
+ """Retrieve a list of application services interested in this event.
+
+ Args:
+ event(Event): The event to check. Can be None if alias_list is not.
+ restrict_to(str): The namespace to restrict regex tests to.
+ alias_list: A list of aliases to get services for. If None, this
+ list is obtained from the database.
+ Returns:
+ list<ApplicationService>: A list of services interested in this
+ event based on the service regex.
+ """
+ member_list = None
+ if hasattr(event, "room_id"):
+ # We need to know the aliases associated with this event.room_id,
+ # if any.
+ if not alias_list:
+ alias_list = yield self.store.get_aliases_for_room(
+ event.room_id
+ )
+ # We need to know the members associated with this event.room_id,
+ # if any.
+ member_list = yield self.store.get_users_in_room(event.room_id)
+
+ services = yield self.store.get_app_services()
+ interested_list = [
+ s for s in services if (
+ s.is_interested(event, restrict_to, alias_list, member_list)
+ )
+ ]
+ defer.returnValue(interested_list)
+
+ @defer.inlineCallbacks
+ def _get_services_for_user(self, user_id):
+ services = yield self.store.get_app_services()
+ interested_list = [
+ s for s in services if (
+ s.is_interested_in_user(user_id)
+ )
+ ]
+ defer.returnValue(interested_list)
+
+ @defer.inlineCallbacks
+ def _is_unknown_user(self, user_id):
+ user = UserID.from_string(user_id)
+ if not self.hs.is_mine(user):
+ # we don't know if they are unknown or not since it isn't one of our
+ # users. We can't poke ASes.
+ defer.returnValue(False)
+ return
+
+ user_info = yield self.store.get_user_by_id(user_id)
+ if user_info:
+ defer.returnValue(False)
+ return
+
+ # user not found; could be the AS though, so check.
+ services = yield self.store.get_app_services()
+ service_list = [s for s in services if s.sender == user_id]
+ defer.returnValue(len(service_list) == 0)
+
+ @defer.inlineCallbacks
+ def _check_user_exists(self, user_id):
+ unknown_user = yield self._is_unknown_user(user_id)
+ if unknown_user:
+ exists = yield self.query_user_exists(user_id)
+ defer.returnValue(exists)
+ defer.returnValue(True)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
new file mode 100644
index 00000000..be157e2b
--- /dev/null
+++ b/synapse/handlers/auth.py
@@ -0,0 +1,480 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import BaseHandler
+from synapse.api.constants import LoginType
+from synapse.types import UserID
+from synapse.api.errors import AuthError, LoginError, Codes
+from synapse.util.async import run_on_reactor
+
+from twisted.web.client import PartialDownloadError
+
+import logging
+import bcrypt
+import pymacaroons
+import simplejson
+
+import synapse.util.stringutils as stringutils
+
+
+logger = logging.getLogger(__name__)
+
+
+class AuthHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(AuthHandler, self).__init__(hs)
+ self.checkers = {
+ LoginType.PASSWORD: self._check_password_auth,
+ LoginType.RECAPTCHA: self._check_recaptcha,
+ LoginType.EMAIL_IDENTITY: self._check_email_identity,
+ LoginType.DUMMY: self._check_dummy_auth,
+ }
+ self.bcrypt_rounds = hs.config.bcrypt_rounds
+ self.sessions = {}
+ self.INVALID_TOKEN_HTTP_STATUS = 401
+
+ @defer.inlineCallbacks
+ def check_auth(self, flows, clientdict, clientip):
+ """
+ Takes a dictionary sent by the client in the login / registration
+ protocol and handles the login flow.
+
+ As a side effect, this function fills in the 'creds' key on the user's
+ session with a map, which maps each auth-type (str) to the relevant
+ identity authenticated by that auth-type (mostly str, but for captcha, bool).
+
+ Args:
+ flows (list): A list of login flows. Each flow is an ordered list of
+ strings representing auth-types. At least one full
+ flow must be completed in order for auth to be successful.
+ clientdict: The dictionary from the client root level, not the
+ 'auth' key: this method prompts for auth if none is sent.
+ clientip (str): The IP address of the client.
+ Returns:
+ A tuple of (authed, dict, dict) where authed is true if the client
+ has successfully completed an auth flow. If it is true, the first
+ dict contains the authenticated credentials of each stage.
+
+ If authed is false, the first dictionary is the server response to
+ the login request and should be passed back to the client.
+
+ In either case, the second dict contains the parameters for this
+ request (which may have been given only in a previous call).
+ """
+
+ authdict = None
+ sid = None
+ if clientdict and 'auth' in clientdict:
+ authdict = clientdict['auth']
+ del clientdict['auth']
+ if 'session' in authdict:
+ sid = authdict['session']
+ session = self._get_session_info(sid)
+
+ if len(clientdict) > 0:
+ # This was designed to allow the client to omit the parameters
+ # and just supply the session in subsequent calls so it split
+ # auth between devices by just sharing the session, (eg. so you
+ # could continue registration from your phone having clicked the
+ # email auth link on there). It's probably too open to abuse
+ # because it lets unauthenticated clients store arbitrary objects
+ # on a home server.
+ # Revisit: Assumimg the REST APIs do sensible validation, the data
+ # isn't arbintrary.
+ session['clientdict'] = clientdict
+ self._save_session(session)
+ elif 'clientdict' in session:
+ clientdict = session['clientdict']
+
+ if not authdict:
+ defer.returnValue(
+ (False, self._auth_dict_for_flows(flows, session), clientdict)
+ )
+
+ if 'creds' not in session:
+ session['creds'] = {}
+ creds = session['creds']
+
+ # check auth type currently being presented
+ if 'type' in authdict:
+ if authdict['type'] not in self.checkers:
+ raise LoginError(400, "", Codes.UNRECOGNIZED)
+ result = yield self.checkers[authdict['type']](authdict, clientip)
+ if result:
+ creds[authdict['type']] = result
+ self._save_session(session)
+
+ for f in flows:
+ if len(set(f) - set(creds.keys())) == 0:
+ logger.info("Auth completed with creds: %r", creds)
+ self._remove_session(session)
+ defer.returnValue((True, creds, clientdict))
+
+ ret = self._auth_dict_for_flows(flows, session)
+ ret['completed'] = creds.keys()
+ defer.returnValue((False, ret, clientdict))
+
+ @defer.inlineCallbacks
+ def add_oob_auth(self, stagetype, authdict, clientip):
+ """
+ Adds the result of out-of-band authentication into an existing auth
+ session. Currently used for adding the result of fallback auth.
+ """
+ if stagetype not in self.checkers:
+ raise LoginError(400, "", Codes.MISSING_PARAM)
+ if 'session' not in authdict:
+ raise LoginError(400, "", Codes.MISSING_PARAM)
+
+ sess = self._get_session_info(
+ authdict['session']
+ )
+ if 'creds' not in sess:
+ sess['creds'] = {}
+ creds = sess['creds']
+
+ result = yield self.checkers[stagetype](authdict, clientip)
+ if result:
+ creds[stagetype] = result
+ self._save_session(sess)
+ defer.returnValue(True)
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def _check_password_auth(self, authdict, _):
+ if "user" not in authdict or "password" not in authdict:
+ raise LoginError(400, "", Codes.MISSING_PARAM)
+
+ user_id = authdict["user"]
+ password = authdict["password"]
+ if not user_id.startswith('@'):
+ user_id = UserID.create(user_id, self.hs.hostname).to_string()
+
+ user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
+ self._check_password(user_id, password, password_hash)
+ defer.returnValue(user_id)
+
+ @defer.inlineCallbacks
+ def _check_recaptcha(self, authdict, clientip):
+ try:
+ user_response = authdict["response"]
+ except KeyError:
+ # Client tried to provide captcha but didn't give the parameter:
+ # bad request.
+ raise LoginError(
+ 400, "Captcha response is required",
+ errcode=Codes.CAPTCHA_NEEDED
+ )
+
+ logger.info(
+ "Submitting recaptcha response %s with remoteip %s",
+ user_response, clientip
+ )
+
+ # TODO: get this from the homeserver rather than creating a new one for
+ # each request
+ try:
+ client = self.hs.get_simple_http_client()
+ resp_body = yield client.post_urlencoded_get_json(
+ self.hs.config.recaptcha_siteverify_api,
+ args={
+ 'secret': self.hs.config.recaptcha_private_key,
+ 'response': user_response,
+ 'remoteip': clientip,
+ }
+ )
+ except PartialDownloadError as pde:
+ # Twisted is silly
+ data = pde.response
+ resp_body = simplejson.loads(data)
+
+ if 'success' in resp_body and resp_body['success']:
+ defer.returnValue(True)
+ raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+
+ @defer.inlineCallbacks
+ def _check_email_identity(self, authdict, _):
+ yield run_on_reactor()
+
+ if 'threepid_creds' not in authdict:
+ raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
+
+ threepid_creds = authdict['threepid_creds']
+ identity_handler = self.hs.get_handlers().identity_handler
+
+ logger.info("Getting validated threepid. threepidcreds: %r" % (threepid_creds,))
+ threepid = yield identity_handler.threepid_from_creds(threepid_creds)
+
+ if not threepid:
+ raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+
+ threepid['threepid_creds'] = authdict['threepid_creds']
+
+ defer.returnValue(threepid)
+
+ @defer.inlineCallbacks
+ def _check_dummy_auth(self, authdict, _):
+ yield run_on_reactor()
+ defer.returnValue(True)
+
+ def _get_params_recaptcha(self):
+ return {"public_key": self.hs.config.recaptcha_public_key}
+
+ def _auth_dict_for_flows(self, flows, session):
+ public_flows = []
+ for f in flows:
+ public_flows.append(f)
+
+ get_params = {
+ LoginType.RECAPTCHA: self._get_params_recaptcha,
+ }
+
+ params = {}
+
+ for f in public_flows:
+ for stage in f:
+ if stage in get_params and stage not in params:
+ params[stage] = get_params[stage]()
+
+ return {
+ "session": session['id'],
+ "flows": [{"stages": f} for f in public_flows],
+ "params": params
+ }
+
+ def _get_session_info(self, session_id):
+ if session_id not in self.sessions:
+ session_id = None
+
+ if not session_id:
+ # create a new session
+ while session_id is None or session_id in self.sessions:
+ session_id = stringutils.random_string(24)
+ self.sessions[session_id] = {
+ "id": session_id,
+ }
+
+ return self.sessions[session_id]
+
+ @defer.inlineCallbacks
+ def login_with_password(self, user_id, password):
+ """
+ Authenticates the user with their username and password.
+
+ Used only by the v1 login API.
+
+ Args:
+ user_id (str): User ID
+ password (str): Password
+ Returns:
+ A tuple of:
+ The user's ID.
+ The access token for the user's session.
+ The refresh token for the user's session.
+ Raises:
+ StoreError if there was a problem storing the token.
+ LoginError if there was an authentication problem.
+ """
+ user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
+ self._check_password(user_id, password, password_hash)
+
+ logger.info("Logging in user %s", user_id)
+ access_token = yield self.issue_access_token(user_id)
+ refresh_token = yield self.issue_refresh_token(user_id)
+ defer.returnValue((user_id, access_token, refresh_token))
+
+ @defer.inlineCallbacks
+ def get_login_tuple_for_user_id(self, user_id):
+ """
+ Gets login tuple for the user with the given user ID.
+ The user is assumed to have been authenticated by some other
+ machanism (e.g. CAS)
+
+ Args:
+ user_id (str): User ID
+ Returns:
+ A tuple of:
+ The user's ID.
+ The access token for the user's session.
+ The refresh token for the user's session.
+ Raises:
+ StoreError if there was a problem storing the token.
+ LoginError if there was an authentication problem.
+ """
+ user_id, ignored = yield self._find_user_id_and_pwd_hash(user_id)
+
+ logger.info("Logging in user %s", user_id)
+ access_token = yield self.issue_access_token(user_id)
+ refresh_token = yield self.issue_refresh_token(user_id)
+ defer.returnValue((user_id, access_token, refresh_token))
+
+ @defer.inlineCallbacks
+ def does_user_exist(self, user_id):
+ try:
+ yield self._find_user_id_and_pwd_hash(user_id)
+ defer.returnValue(True)
+ except LoginError:
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def _find_user_id_and_pwd_hash(self, user_id):
+ """Checks to see if a user with the given id exists. Will check case
+ insensitively, but will throw if there are multiple inexact matches.
+
+ Returns:
+ tuple: A 2-tuple of `(canonical_user_id, password_hash)`
+ """
+ user_infos = yield self.store.get_users_by_id_case_insensitive(user_id)
+ if not user_infos:
+ logger.warn("Attempted to login as %s but they do not exist", user_id)
+ raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+
+ if len(user_infos) > 1:
+ if user_id not in user_infos:
+ logger.warn(
+ "Attempted to login as %s but it matches more than one user "
+ "inexactly: %r",
+ user_id, user_infos.keys()
+ )
+ raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+
+ defer.returnValue((user_id, user_infos[user_id]))
+ else:
+ defer.returnValue(user_infos.popitem())
+
+ def _check_password(self, user_id, password, stored_hash):
+ """Checks that user_id has passed password, raises LoginError if not."""
+ if not self.validate_hash(password, stored_hash):
+ logger.warn("Failed password login for user %s", user_id)
+ raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+
+ @defer.inlineCallbacks
+ def issue_access_token(self, user_id):
+ access_token = self.generate_access_token(user_id)
+ yield self.store.add_access_token_to_user(user_id, access_token)
+ defer.returnValue(access_token)
+
+ @defer.inlineCallbacks
+ def issue_refresh_token(self, user_id):
+ refresh_token = self.generate_refresh_token(user_id)
+ yield self.store.add_refresh_token_to_user(user_id, refresh_token)
+ defer.returnValue(refresh_token)
+
+ def generate_access_token(self, user_id, extra_caveats=None):
+ extra_caveats = extra_caveats or []
+ macaroon = self._generate_base_macaroon(user_id)
+ macaroon.add_first_party_caveat("type = access")
+ now = self.hs.get_clock().time_msec()
+ expiry = now + (60 * 60 * 1000)
+ macaroon.add_first_party_caveat("time < %d" % (expiry,))
+ for caveat in extra_caveats:
+ macaroon.add_first_party_caveat(caveat)
+ return macaroon.serialize()
+
+ def generate_refresh_token(self, user_id):
+ m = self._generate_base_macaroon(user_id)
+ m.add_first_party_caveat("type = refresh")
+ # Important to add a nonce, because otherwise every refresh token for a
+ # user will be the same.
+ m.add_first_party_caveat("nonce = %s" % (
+ stringutils.random_string_with_symbols(16),
+ ))
+ return m.serialize()
+
+ def generate_short_term_login_token(self, user_id):
+ macaroon = self._generate_base_macaroon(user_id)
+ macaroon.add_first_party_caveat("type = login")
+ now = self.hs.get_clock().time_msec()
+ expiry = now + (2 * 60 * 1000)
+ macaroon.add_first_party_caveat("time < %d" % (expiry,))
+ return macaroon.serialize()
+
+ def validate_short_term_login_token_and_get_user_id(self, login_token):
+ try:
+ macaroon = pymacaroons.Macaroon.deserialize(login_token)
+ auth_api = self.hs.get_auth()
+ auth_api.validate_macaroon(macaroon, "login", [auth_api.verify_expiry])
+ return self._get_user_from_macaroon(macaroon)
+ except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
+ raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
+
+ def _generate_base_macaroon(self, user_id):
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
+ return macaroon
+
+ def _get_user_from_macaroon(self, macaroon):
+ user_prefix = "user_id = "
+ for caveat in macaroon.caveats:
+ if caveat.caveat_id.startswith(user_prefix):
+ return caveat.caveat_id[len(user_prefix):]
+ raise AuthError(
+ self.INVALID_TOKEN_HTTP_STATUS, "No user_id found in token",
+ errcode=Codes.UNKNOWN_TOKEN
+ )
+
+ @defer.inlineCallbacks
+ def set_password(self, user_id, newpassword):
+ password_hash = self.hash(newpassword)
+
+ yield self.store.user_set_password_hash(user_id, password_hash)
+ yield self.store.user_delete_access_tokens(user_id)
+ yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
+ yield self.store.flush_user(user_id)
+
+ @defer.inlineCallbacks
+ def add_threepid(self, user_id, medium, address, validated_at):
+ yield self.store.user_add_threepid(
+ user_id, medium, address, validated_at,
+ self.hs.get_clock().time_msec()
+ )
+
+ def _save_session(self, session):
+ # TODO: Persistent storage
+ logger.debug("Saving session %s", session)
+ self.sessions[session["id"]] = session
+
+ def _remove_session(self, session):
+ logger.debug("Removing session %s", session)
+ del self.sessions[session["id"]]
+
+ def hash(self, password):
+ """Computes a secure hash of password.
+
+ Args:
+ password (str): Password to hash.
+
+ Returns:
+ Hashed password (str).
+ """
+ return bcrypt.hashpw(password, bcrypt.gensalt(self.bcrypt_rounds))
+
+ def validate_hash(self, password, stored_hash):
+ """Validates that self.hash(password) == stored_hash.
+
+ Args:
+ password (str): Password to hash.
+ stored_hash (str): Expected hash value.
+
+ Returns:
+ Whether self.hash(password) == stored_hash (bool).
+ """
+ return bcrypt.checkpw(password, stored_hash)
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
new file mode 100644
index 00000000..e41a6888
--- /dev/null
+++ b/synapse/handlers/directory.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+from ._base import BaseHandler
+
+from synapse.api.errors import SynapseError, Codes, CodeMessageException
+from synapse.api.constants import EventTypes
+from synapse.types import RoomAlias
+
+import logging
+import string
+
+logger = logging.getLogger(__name__)
+
+
+class DirectoryHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(DirectoryHandler, self).__init__(hs)
+
+ self.federation = hs.get_replication_layer()
+ self.federation.register_query_handler(
+ "directory", self.on_directory_query
+ )
+
+ @defer.inlineCallbacks
+ def _create_association(self, room_alias, room_id, servers=None):
+ # general association creation for both human users and app services
+
+ for wchar in string.whitespace:
+ if wchar in room_alias.localpart:
+ raise SynapseError(400, "Invalid characters in room alias")
+
+ if not self.hs.is_mine(room_alias):
+ raise SynapseError(400, "Room alias must be local")
+ # TODO(erikj): Change this.
+
+ # TODO(erikj): Add transactions.
+ # TODO(erikj): Check if there is a current association.
+ if not servers:
+ servers = yield self.store.get_joined_hosts_for_room(room_id)
+
+ if not servers:
+ raise SynapseError(400, "Failed to get server list")
+
+ yield self.store.create_room_alias_association(
+ room_alias,
+ room_id,
+ servers
+ )
+
+ @defer.inlineCallbacks
+ def create_association(self, user_id, room_alias, room_id, servers=None):
+ # association creation for human users
+ # TODO(erikj): Do user auth.
+
+ can_create = yield self.can_modify_alias(
+ room_alias,
+ user_id=user_id
+ )
+ if not can_create:
+ raise SynapseError(
+ 400, "This alias is reserved by an application service.",
+ errcode=Codes.EXCLUSIVE
+ )
+ yield self._create_association(room_alias, room_id, servers)
+
+ @defer.inlineCallbacks
+ def create_appservice_association(self, service, room_alias, room_id,
+ servers=None):
+ if not service.is_interested_in_alias(room_alias.to_string()):
+ raise SynapseError(
+ 400, "This application service has not reserved"
+ " this kind of alias.", errcode=Codes.EXCLUSIVE
+ )
+
+ # association creation for app services
+ yield self._create_association(room_alias, room_id, servers)
+
+ @defer.inlineCallbacks
+ def delete_association(self, user_id, room_alias):
+ # association deletion for human users
+
+ # TODO Check if server admin
+
+ can_delete = yield self.can_modify_alias(
+ room_alias,
+ user_id=user_id
+ )
+ if not can_delete:
+ raise SynapseError(
+ 400, "This alias is reserved by an application service.",
+ errcode=Codes.EXCLUSIVE
+ )
+
+ yield self._delete_association(room_alias)
+
+ @defer.inlineCallbacks
+ def delete_appservice_association(self, service, room_alias):
+ if not service.is_interested_in_alias(room_alias.to_string()):
+ raise SynapseError(
+ 400,
+ "This application service has not reserved this kind of alias",
+ errcode=Codes.EXCLUSIVE
+ )
+ yield self._delete_association(room_alias)
+
+ @defer.inlineCallbacks
+ def _delete_association(self, room_alias):
+ if not self.hs.is_mine(room_alias):
+ raise SynapseError(400, "Room alias must be local")
+
+ yield self.store.delete_room_alias(room_alias)
+
+ # TODO - Looks like _update_room_alias_event has never been implemented
+ # if room_id:
+ # yield self._update_room_alias_events(user_id, room_id)
+
+ @defer.inlineCallbacks
+ def get_association(self, room_alias):
+ room_id = None
+ if self.hs.is_mine(room_alias):
+ result = yield self.get_association_from_room_alias(
+ room_alias
+ )
+
+ if result:
+ room_id = result.room_id
+ servers = result.servers
+ else:
+ try:
+ result = yield self.federation.make_query(
+ destination=room_alias.domain,
+ query_type="directory",
+ args={
+ "room_alias": room_alias.to_string(),
+ },
+ retry_on_dns_fail=False,
+ )
+ except CodeMessageException as e:
+ logging.warn("Error retrieving alias")
+ if e.code == 404:
+ result = None
+ else:
+ raise
+
+ if result and "room_id" in result and "servers" in result:
+ room_id = result["room_id"]
+ servers = result["servers"]
+
+ if not room_id:
+ raise SynapseError(
+ 404,
+ "Room alias %s not found" % (room_alias.to_string(),),
+ Codes.NOT_FOUND
+ )
+
+ extra_servers = yield self.store.get_joined_hosts_for_room(room_id)
+ servers = set(extra_servers) | set(servers)
+
+ # If this server is in the list of servers, return it first.
+ if self.server_name in servers:
+ servers = (
+ [self.server_name]
+ + [s for s in servers if s != self.server_name]
+ )
+ else:
+ servers = list(servers)
+
+ defer.returnValue({
+ "room_id": room_id,
+ "servers": servers,
+ })
+ return
+
+ @defer.inlineCallbacks
+ def on_directory_query(self, args):
+ room_alias = RoomAlias.from_string(args["room_alias"])
+ if not self.hs.is_mine(room_alias):
+ raise SynapseError(
+ 400, "Room Alias is not hosted on this Home Server"
+ )
+
+ result = yield self.get_association_from_room_alias(
+ room_alias
+ )
+
+ if result is not None:
+ defer.returnValue({
+ "room_id": result.room_id,
+ "servers": result.servers,
+ })
+ else:
+ raise SynapseError(
+ 404,
+ "Room alias %r not found" % (room_alias.to_string(),),
+ Codes.NOT_FOUND
+ )
+
+ @defer.inlineCallbacks
+ def send_room_alias_update_event(self, user_id, room_id):
+ aliases = yield self.store.get_aliases_for_room(room_id)
+
+ msg_handler = self.hs.get_handlers().message_handler
+ yield msg_handler.create_and_send_event({
+ "type": EventTypes.Aliases,
+ "state_key": self.hs.hostname,
+ "room_id": room_id,
+ "sender": user_id,
+ "content": {"aliases": aliases},
+ }, ratelimit=False)
+
+ @defer.inlineCallbacks
+ def get_association_from_room_alias(self, room_alias):
+ result = yield self.store.get_association_from_room_alias(
+ room_alias
+ )
+ if not result:
+ # Query AS to see if it exists
+ as_handler = self.hs.get_handlers().appservice_handler
+ result = yield as_handler.query_room_alias_exists(room_alias)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def can_modify_alias(self, alias, user_id=None):
+ # Any application service "interested" in an alias they are regexing on
+ # can modify the alias.
+ # Users can only modify the alias if ALL the interested services have
+ # non-exclusive locks on the alias (or there are no interested services)
+ services = yield self.store.get_app_services()
+ interested_services = [
+ s for s in services if s.is_interested_in_alias(alias.to_string())
+ ]
+
+ for service in interested_services:
+ if user_id == service.sender:
+ # this user IS the app service so they can do whatever they like
+ defer.returnValue(True)
+ return
+ elif service.is_exclusive_alias(alias.to_string()):
+ # another service has an exclusive lock on this alias.
+ defer.returnValue(False)
+ return
+ # either no interested services, or no service with an exclusive lock
+ defer.returnValue(True)
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
new file mode 100644
index 00000000..0e4c0d4d
--- /dev/null
+++ b/synapse/handlers/events.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.util.logutils import log_function
+from synapse.types import UserID
+from synapse.events.utils import serialize_event
+
+from ._base import BaseHandler
+
+import logging
+import random
+
+
+logger = logging.getLogger(__name__)
+
+
+class EventStreamHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(EventStreamHandler, self).__init__(hs)
+
+ # Count of active streams per user
+ self._streams_per_user = {}
+ # Grace timers per user to delay the "stopped" signal
+ self._stop_timer_per_user = {}
+
+ self.distributor = hs.get_distributor()
+ self.distributor.declare("started_user_eventstream")
+ self.distributor.declare("stopped_user_eventstream")
+
+ self.clock = hs.get_clock()
+
+ self.notifier = hs.get_notifier()
+
+ @defer.inlineCallbacks
+ def started_stream(self, user):
+ """Tells the presence handler that we have started an eventstream for
+ the user:
+
+ Args:
+ user (User): The user who started a stream.
+ Returns:
+ A deferred that completes once their presence has been updated.
+ """
+ if user not in self._streams_per_user:
+ self._streams_per_user[user] = 0
+ if user in self._stop_timer_per_user:
+ try:
+ self.clock.cancel_call_later(
+ self._stop_timer_per_user.pop(user)
+ )
+ except:
+ logger.exception("Failed to cancel event timer")
+ else:
+ yield self.distributor.fire("started_user_eventstream", user)
+
+ self._streams_per_user[user] += 1
+
+ def stopped_stream(self, user):
+ """If there are no streams for a user this starts a timer that will
+ notify the presence handler that we haven't got an event stream for
+ the user unless the user starts a new stream in 30 seconds.
+
+ Args:
+ user (User): The user who stopped a stream.
+ """
+ self._streams_per_user[user] -= 1
+ if not self._streams_per_user[user]:
+ del self._streams_per_user[user]
+
+ # 30 seconds of grace to allow the client to reconnect again
+ # before we think they're gone
+ def _later():
+ logger.debug("_later stopped_user_eventstream %s", user)
+
+ self._stop_timer_per_user.pop(user, None)
+
+ return self.distributor.fire("stopped_user_eventstream", user)
+
+ logger.debug("Scheduling _later: for %s", user)
+ self._stop_timer_per_user[user] = (
+ self.clock.call_later(30, _later)
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_stream(self, auth_user_id, pagin_config, timeout=0,
+ as_client_event=True, affect_presence=True,
+ only_room_events=False, room_id=None, is_guest=False):
+ """Fetches the events stream for a given user.
+
+ If `only_room_events` is `True` only room events will be returned.
+ """
+ auth_user = UserID.from_string(auth_user_id)
+
+ try:
+ if affect_presence:
+ yield self.started_stream(auth_user)
+
+ if timeout:
+ # If they've set a timeout set a minimum limit.
+ timeout = max(timeout, 500)
+
+ # Add some randomness to this value to try and mitigate against
+ # thundering herds on restart.
+ timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
+
+ if is_guest:
+ yield self.distributor.fire(
+ "user_joined_room", user=auth_user, room_id=room_id
+ )
+
+ events, tokens = yield self.notifier.get_events_for(
+ auth_user, pagin_config, timeout,
+ only_room_events=only_room_events,
+ is_guest=is_guest, guest_room_id=room_id
+ )
+
+ time_now = self.clock.time_msec()
+
+ chunks = [
+ serialize_event(e, time_now, as_client_event) for e in events
+ ]
+
+ chunk = {
+ "chunk": chunks,
+ "start": tokens[0].to_string(),
+ "end": tokens[1].to_string(),
+ }
+
+ defer.returnValue(chunk)
+
+ finally:
+ if affect_presence:
+ self.stopped_stream(auth_user)
+
+
+class EventHandler(BaseHandler):
+
+ @defer.inlineCallbacks
+ def get_event(self, user, event_id):
+ """Retrieve a single specified event.
+
+ Args:
+ user (synapse.types.UserID): The user requesting the event
+ event_id (str): The event ID to obtain.
+ Returns:
+ dict: An event, or None if there is no event matching this ID.
+ Raises:
+ SynapseError if there was a problem retrieving this event, or
+ AuthError if the user does not have the rights to inspect this
+ event.
+ """
+ event = yield self.store.get_event(event_id)
+
+ if not event:
+ defer.returnValue(None)
+ return
+
+ if hasattr(event, "room_id"):
+ yield self.auth.check_joined_room(event.room_id, user.to_string())
+
+ defer.returnValue(event)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
new file mode 100644
index 00000000..c1bce07e
--- /dev/null
+++ b/synapse/handlers/federation.py
@@ -0,0 +1,1715 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains handlers for federation events."""
+
+from ._base import BaseHandler
+
+from synapse.api.errors import (
+ AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
+)
+from synapse.api.constants import EventTypes, Membership, RejectedReason
+from synapse.events.validator import EventValidator
+from synapse.util import unwrapFirstError
+from synapse.util.logcontext import PreserveLoggingContext
+from synapse.util.logutils import log_function
+from synapse.util.async import run_on_reactor
+from synapse.util.frozenutils import unfreeze
+from synapse.crypto.event_signing import (
+ compute_event_signature, add_hashes_and_signatures,
+)
+from synapse.types import UserID
+
+from synapse.events.utils import prune_event
+
+from synapse.util.retryutils import NotRetryingDestination
+
+from twisted.internet import defer
+
+import itertools
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class FederationHandler(BaseHandler):
+ """Handles events that originated from federation.
+ Responsible for:
+ a) handling received Pdus before handing them on as Events to the rest
+ of the home server (including auth and state conflict resoultion)
+ b) converting events that were produced by local clients that may need
+ to be sent to remote home servers.
+ c) doing the necessary dances to invite remote users and join remote
+ rooms.
+ """
+
+ def __init__(self, hs):
+ super(FederationHandler, self).__init__(hs)
+
+ self.hs = hs
+
+ self.distributor.observe(
+ "user_joined_room",
+ self._on_user_joined
+ )
+
+ self.waiting_for_join_list = {}
+
+ self.store = hs.get_datastore()
+ self.replication_layer = hs.get_replication_layer()
+ self.state_handler = hs.get_state_handler()
+ self.server_name = hs.hostname
+ self.keyring = hs.get_keyring()
+
+ self.replication_layer.set_handler(self)
+
+ # When joining a room we need to queue any events for that room up
+ self.room_queues = {}
+
+ def handle_new_event(self, event, destinations):
+ """ Takes in an event from the client to server side, that has already
+ been authed and handled by the state module, and sends it to any
+ remote home servers that may be interested.
+
+ Args:
+ event: The event to send
+ destinations: A list of destinations to send it to
+
+ Returns:
+ Deferred: Resolved when it has successfully been queued for
+ processing.
+ """
+
+ return self.replication_layer.send_pdu(event, destinations)
+
+ @log_function
+ @defer.inlineCallbacks
+ def on_receive_pdu(self, origin, pdu, backfilled, state=None,
+ auth_chain=None):
+ """ Called by the ReplicationLayer when we have a new pdu. We need to
+ do auth checks and put it through the StateHandler.
+ """
+ event = pdu
+
+ logger.debug("Got event: %s", event.event_id)
+
+ # If we are currently in the process of joining this room, then we
+ # queue up events for later processing.
+ if event.room_id in self.room_queues:
+ self.room_queues[event.room_id].append((pdu, origin))
+ return
+
+ logger.debug("Processing event: %s", event.event_id)
+
+ logger.debug("Event: %s", event)
+
+ # FIXME (erikj): Awful hack to make the case where we are not currently
+ # in the room work
+ current_state = None
+ is_in_room = yield self.auth.check_host_in_room(
+ event.room_id,
+ self.server_name
+ )
+ if not is_in_room and not event.internal_metadata.is_outlier():
+ logger.debug("Got event for room we're not in.")
+
+ try:
+ event_stream_id, max_stream_id = yield self._persist_auth_tree(
+ auth_chain, state, event
+ )
+ except AuthError as e:
+ raise FederationError(
+ "ERROR",
+ e.code,
+ e.msg,
+ affected=event.event_id,
+ )
+
+ else:
+ event_ids = set()
+ if state:
+ event_ids |= {e.event_id for e in state}
+ if auth_chain:
+ event_ids |= {e.event_id for e in auth_chain}
+
+ seen_ids = set(
+ (yield self.store.have_events(event_ids)).keys()
+ )
+
+ if state and auth_chain is not None:
+ # If we have any state or auth_chain given to us by the replication
+ # layer, then we should handle them (if we haven't before.)
+
+ event_infos = []
+
+ for e in itertools.chain(auth_chain, state):
+ if e.event_id in seen_ids:
+ continue
+ e.internal_metadata.outlier = True
+ auth_ids = [e_id for e_id, _ in e.auth_events]
+ auth = {
+ (e.type, e.state_key): e for e in auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ event_infos.append({
+ "event": e,
+ "auth_events": auth,
+ })
+ seen_ids.add(e.event_id)
+
+ yield self._handle_new_events(
+ origin,
+ event_infos,
+ outliers=True
+ )
+
+ try:
+ _, event_stream_id, max_stream_id = yield self._handle_new_event(
+ origin,
+ event,
+ state=state,
+ backfilled=backfilled,
+ current_state=current_state,
+ )
+ except AuthError as e:
+ raise FederationError(
+ "ERROR",
+ e.code,
+ e.msg,
+ affected=event.event_id,
+ )
+
+ # if we're receiving valid events from an origin,
+ # it's probably a good idea to mark it as not in retry-state
+ # for sending (although this is a bit of a leap)
+ retry_timings = yield self.store.get_destination_retry_timings(origin)
+ if retry_timings and retry_timings["retry_last_ts"]:
+ self.store.set_destination_retry_timings(origin, 0, 0)
+
+ room = yield self.store.get_room(event.room_id)
+
+ if not room:
+ try:
+ yield self.store.store_room(
+ room_id=event.room_id,
+ room_creator_user_id="",
+ is_public=False,
+ )
+ except StoreError:
+ logger.exception("Failed to store room.")
+
+ if not backfilled:
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
+
+ with PreserveLoggingContext():
+ d = self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id,
+ extra_users=extra_users
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ event.event_id, f.value
+ )
+
+ d.addErrback(log_failure)
+
+ if event.type == EventTypes.Member:
+ if event.membership == Membership.JOIN:
+ user = UserID.from_string(event.state_key)
+ yield self.distributor.fire(
+ "user_joined_room", user=user, room_id=event.room_id
+ )
+
+ @defer.inlineCallbacks
+ def _filter_events_for_server(self, server_name, room_id, events):
+ event_to_state = yield self.store.get_state_for_events(
+ frozenset(e.event_id for e in events),
+ types=(
+ (EventTypes.RoomHistoryVisibility, ""),
+ (EventTypes.Member, None),
+ )
+ )
+
+ def redact_disallowed(event, state):
+ if not state:
+ return event
+
+ history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
+ if history:
+ visibility = history.content.get("history_visibility", "shared")
+ if visibility in ["invited", "joined"]:
+ # We now loop through all state events looking for
+ # membership states for the requesting server to determine
+ # if the server is either in the room or has been invited
+ # into the room.
+ for ev in state.values():
+ if ev.type != EventTypes.Member:
+ continue
+ try:
+ domain = UserID.from_string(ev.state_key).domain
+ except:
+ continue
+
+ if domain != server_name:
+ continue
+
+ memtype = ev.membership
+ if memtype == Membership.JOIN:
+ return event
+ elif memtype == Membership.INVITE:
+ if visibility == "invited":
+ return event
+ else:
+ return prune_event(event)
+
+ return event
+
+ defer.returnValue([
+ redact_disallowed(e, event_to_state[e.event_id])
+ for e in events
+ ])
+
+ @log_function
+ @defer.inlineCallbacks
+ def backfill(self, dest, room_id, limit, extremities=[]):
+ """ Trigger a backfill request to `dest` for the given `room_id`
+ """
+ if not extremities:
+ extremities = yield self.store.get_oldest_events_in_room(room_id)
+
+ events = yield self.replication_layer.backfill(
+ dest,
+ room_id,
+ limit=limit,
+ extremities=extremities,
+ )
+
+ event_map = {e.event_id: e for e in events}
+
+ event_ids = set(e.event_id for e in events)
+
+ edges = [
+ ev.event_id
+ for ev in events
+ if set(e_id for e_id, _ in ev.prev_events) - event_ids
+ ]
+
+ logger.info(
+ "backfill: Got %d events with %d edges",
+ len(events), len(edges),
+ )
+
+ # For each edge get the current state.
+
+ auth_events = {}
+ state_events = {}
+ events_to_state = {}
+ for e_id in edges:
+ state, auth = yield self.replication_layer.get_state_for_room(
+ destination=dest,
+ room_id=room_id,
+ event_id=e_id
+ )
+ auth_events.update({a.event_id: a for a in auth})
+ auth_events.update({s.event_id: s for s in state})
+ state_events.update({s.event_id: s for s in state})
+ events_to_state[e_id] = state
+
+ seen_events = yield self.store.have_events(
+ set(auth_events.keys()) | set(state_events.keys())
+ )
+
+ all_events = events + state_events.values() + auth_events.values()
+ required_auth = set(
+ a_id for event in all_events for a_id, _ in event.auth_events
+ )
+
+ missing_auth = required_auth - set(auth_events)
+ results = yield defer.gatherResults(
+ [
+ self.replication_layer.get_pdu(
+ [dest],
+ event_id,
+ outlier=True,
+ timeout=10000,
+ )
+ for event_id in missing_auth
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+ auth_events.update({a.event_id: a for a in results})
+
+ ev_infos = []
+ for a in auth_events.values():
+ if a.event_id in seen_events:
+ continue
+ ev_infos.append({
+ "event": a,
+ "auth_events": {
+ (auth_events[a_id].type, auth_events[a_id].state_key):
+ auth_events[a_id]
+ for a_id, _ in a.auth_events
+ }
+ })
+
+ for e_id in events_to_state:
+ ev_infos.append({
+ "event": event_map[e_id],
+ "state": events_to_state[e_id],
+ "auth_events": {
+ (auth_events[a_id].type, auth_events[a_id].state_key):
+ auth_events[a_id]
+ for a_id, _ in event_map[e_id].auth_events
+ }
+ })
+
+ events.sort(key=lambda e: e.depth)
+
+ for event in events:
+ if event in events_to_state:
+ continue
+
+ ev_infos.append({
+ "event": event,
+ })
+
+ yield self._handle_new_events(
+ dest, ev_infos,
+ backfilled=True,
+ )
+
+ defer.returnValue(events)
+
+ @defer.inlineCallbacks
+ def maybe_backfill(self, room_id, current_depth):
+ """Checks the database to see if we should backfill before paginating,
+ and if so do.
+ """
+ extremities = yield self.store.get_oldest_events_with_depth_in_room(
+ room_id
+ )
+
+ if not extremities:
+ logger.debug("Not backfilling as no extremeties found.")
+ return
+
+ # Check if we reached a point where we should start backfilling.
+ sorted_extremeties_tuple = sorted(
+ extremities.items(),
+ key=lambda e: -int(e[1])
+ )
+ max_depth = sorted_extremeties_tuple[0][1]
+
+ if current_depth > max_depth:
+ logger.debug(
+ "Not backfilling as we don't need to. %d < %d",
+ max_depth, current_depth,
+ )
+ return
+
+ # Now we need to decide which hosts to hit first.
+
+ # First we try hosts that are already in the room
+ # TODO: HEURISTIC ALERT.
+
+ curr_state = yield self.state_handler.get_current_state(room_id)
+
+ def get_domains_from_state(state):
+ joined_users = [
+ (state_key, int(event.depth))
+ for (e_type, state_key), event in state.items()
+ if e_type == EventTypes.Member
+ and event.membership == Membership.JOIN
+ ]
+
+ joined_domains = {}
+ for u, d in joined_users:
+ try:
+ dom = UserID.from_string(u).domain
+ old_d = joined_domains.get(dom)
+ if old_d:
+ joined_domains[dom] = min(d, old_d)
+ else:
+ joined_domains[dom] = d
+ except:
+ pass
+
+ return sorted(joined_domains.items(), key=lambda d: d[1])
+
+ curr_domains = get_domains_from_state(curr_state)
+
+ likely_domains = [
+ domain for domain, depth in curr_domains
+ if domain is not self.server_name
+ ]
+
+ @defer.inlineCallbacks
+ def try_backfill(domains):
+ # TODO: Should we try multiple of these at a time?
+ for dom in domains:
+ try:
+ events = yield self.backfill(
+ dom, room_id,
+ limit=100,
+ extremities=[e for e in extremities.keys()]
+ )
+ except SynapseError:
+ logger.info(
+ "Failed to backfill from %s because %s",
+ dom, e,
+ )
+ continue
+ except CodeMessageException as e:
+ if 400 <= e.code < 500:
+ raise
+
+ logger.info(
+ "Failed to backfill from %s because %s",
+ dom, e,
+ )
+ continue
+ except NotRetryingDestination as e:
+ logger.info(e.message)
+ continue
+ except Exception as e:
+ logger.exception(
+ "Failed to backfill from %s because %s",
+ dom, e,
+ )
+ continue
+
+ if events:
+ defer.returnValue(True)
+ defer.returnValue(False)
+
+ success = yield try_backfill(likely_domains)
+ if success:
+ defer.returnValue(True)
+
+ # Huh, well *those* domains didn't work out. Lets try some domains
+ # from the time.
+
+ tried_domains = set(likely_domains)
+ tried_domains.add(self.server_name)
+
+ event_ids = list(extremities.keys())
+
+ states = yield defer.gatherResults([
+ self.state_handler.resolve_state_groups(room_id, [e])
+ for e in event_ids
+ ])
+ states = dict(zip(event_ids, [s[1] for s in states]))
+
+ for e_id, _ in sorted_extremeties_tuple:
+ likely_domains = get_domains_from_state(states[e_id])
+
+ success = yield try_backfill([
+ dom for dom in likely_domains
+ if dom not in tried_domains
+ ])
+ if success:
+ defer.returnValue(True)
+
+ tried_domains.update(likely_domains)
+
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def send_invite(self, target_host, event):
+ """ Sends the invite to the remote server for signing.
+
+ Invites must be signed by the invitee's server before distribution.
+ """
+ pdu = yield self.replication_layer.send_invite(
+ destination=target_host,
+ room_id=event.room_id,
+ event_id=event.event_id,
+ pdu=event
+ )
+
+ defer.returnValue(pdu)
+
+ @defer.inlineCallbacks
+ def on_event_auth(self, event_id):
+ auth = yield self.store.get_auth_chain([event_id])
+
+ for event in auth:
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+
+ defer.returnValue([e for e in auth])
+
+ @log_function
+ @defer.inlineCallbacks
+ def do_invite_join(self, target_hosts, room_id, joinee, content):
+ """ Attempts to join the `joinee` to the room `room_id` via the
+ server `target_host`.
+
+ This first triggers a /make_join/ request that returns a partial
+ event that we can fill out and sign. This is then sent to the
+ remote server via /send_join/ which responds with the state at that
+ event and the auth_chains.
+
+ We suspend processing of any received events from this room until we
+ have finished processing the join.
+ """
+ logger.debug("Joining %s to %s", joinee, room_id)
+
+ yield self.store.clean_room_for_join(room_id)
+
+ origin, event = yield self._make_and_verify_event(
+ target_hosts,
+ room_id,
+ joinee,
+ "join",
+ content,
+ )
+
+ self.room_queues[room_id] = []
+ handled_events = set()
+
+ try:
+ new_event = self._sign_event(event)
+ # Try the host we successfully got a response to /make_join/
+ # request first.
+ try:
+ target_hosts.remove(origin)
+ target_hosts.insert(0, origin)
+ except ValueError:
+ pass
+ ret = yield self.replication_layer.send_join(target_hosts, new_event)
+
+ origin = ret["origin"]
+ state = ret["state"]
+ auth_chain = ret["auth_chain"]
+ auth_chain.sort(key=lambda e: e.depth)
+
+ handled_events.update([s.event_id for s in state])
+ handled_events.update([a.event_id for a in auth_chain])
+ handled_events.add(new_event.event_id)
+
+ logger.debug("do_invite_join auth_chain: %s", auth_chain)
+ logger.debug("do_invite_join state: %s", state)
+
+ logger.debug("do_invite_join event: %s", new_event)
+
+ try:
+ yield self.store.store_room(
+ room_id=room_id,
+ room_creator_user_id="",
+ is_public=False
+ )
+ except:
+ # FIXME
+ pass
+
+ event_stream_id, max_stream_id = yield self._persist_auth_tree(
+ auth_chain, state, event
+ )
+
+ with PreserveLoggingContext():
+ d = self.notifier.on_new_room_event(
+ new_event, event_stream_id, max_stream_id,
+ extra_users=[joinee]
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ new_event.event_id, f.value
+ )
+
+ d.addErrback(log_failure)
+
+ logger.debug("Finished joining %s to %s", joinee, room_id)
+ finally:
+ room_queue = self.room_queues[room_id]
+ del self.room_queues[room_id]
+
+ for p, origin in room_queue:
+ if p.event_id in handled_events:
+ continue
+
+ try:
+ self.on_receive_pdu(origin, p, backfilled=False)
+ except:
+ logger.exception("Couldn't handle pdu")
+
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_make_join_request(self, room_id, user_id):
+ """ We've received a /make_join/ request, so we create a partial
+ join event for the room and return that. We do *not* persist or
+ process it until the other server has signed it and sent it back.
+ """
+ event_content = {"membership": Membership.JOIN}
+
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "content": event_content,
+ "room_id": room_id,
+ "sender": user_id,
+ "state_key": user_id,
+ })
+
+ event, context = yield self._create_new_client_event(
+ builder=builder,
+ )
+
+ self.auth.check(event, auth_events=context.current_state)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_send_join_request(self, origin, pdu):
+ """ We have received a join event for a room. Fully process it and
+ respond with the current state and auth chains.
+ """
+ event = pdu
+
+ logger.debug(
+ "on_send_join_request: Got event: %s, signatures: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ event.internal_metadata.outlier = False
+
+ context, event_stream_id, max_stream_id = yield self._handle_new_event(
+ origin, event
+ )
+
+ logger.debug(
+ "on_send_join_request: After _handle_new_event: %s, sigs: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
+
+ with PreserveLoggingContext():
+ d = self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id, extra_users=extra_users
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ event.event_id, f.value
+ )
+
+ d.addErrback(log_failure)
+
+ if event.type == EventTypes.Member:
+ if event.content["membership"] == Membership.JOIN:
+ user = UserID.from_string(event.state_key)
+ yield self.distributor.fire(
+ "user_joined_room", user=user, room_id=event.room_id
+ )
+
+ new_pdu = event
+
+ destinations = set()
+
+ for k, s in context.current_state.items():
+ try:
+ if k[0] == EventTypes.Member:
+ if s.content["membership"] == Membership.JOIN:
+ destinations.add(
+ UserID.from_string(s.state_key).domain
+ )
+ except:
+ logger.warn(
+ "Failed to get destination from event %s", s.event_id
+ )
+
+ destinations.discard(origin)
+
+ logger.debug(
+ "on_send_join_request: Sending event: %s, signatures: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ self.replication_layer.send_pdu(new_pdu, destinations)
+
+ state_ids = [e.event_id for e in context.current_state.values()]
+ auth_chain = yield self.store.get_auth_chain(set(
+ [event.event_id] + state_ids
+ ))
+
+ defer.returnValue({
+ "state": context.current_state.values(),
+ "auth_chain": auth_chain,
+ })
+
+ @defer.inlineCallbacks
+ def on_invite_request(self, origin, pdu):
+ """ We've got an invite event. Process and persist it. Sign it.
+
+ Respond with the now signed event.
+ """
+ event = pdu
+
+ event.internal_metadata.outlier = True
+
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+
+ context = yield self.state_handler.compute_event_context(event)
+
+ event_stream_id, max_stream_id = yield self.store.persist_event(
+ event,
+ context=context,
+ backfilled=False,
+ )
+
+ target_user = UserID.from_string(event.state_key)
+ with PreserveLoggingContext():
+ d = self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id,
+ extra_users=[target_user],
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ event.event_id, f.value
+ )
+
+ d.addErrback(log_failure)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
+ origin, event = yield self._make_and_verify_event(
+ target_hosts,
+ room_id,
+ user_id,
+ "leave"
+ )
+ signed_event = self._sign_event(event)
+
+ # Try the host we successfully got a response to /make_join/
+ # request first.
+ try:
+ target_hosts.remove(origin)
+ target_hosts.insert(0, origin)
+ except ValueError:
+ pass
+
+ yield self.replication_layer.send_leave(
+ target_hosts,
+ signed_event
+ )
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
+ content={},):
+ origin, pdu = yield self.replication_layer.make_membership_event(
+ target_hosts,
+ room_id,
+ user_id,
+ membership,
+ content,
+ )
+
+ logger.debug("Got response to make_%s: %s", membership, pdu)
+
+ event = pdu
+
+ # We should assert some things.
+ # FIXME: Do this in a nicer way
+ assert(event.type == EventTypes.Member)
+ assert(event.user_id == user_id)
+ assert(event.state_key == user_id)
+ assert(event.room_id == room_id)
+ defer.returnValue((origin, event))
+
+ def _sign_event(self, event):
+ event.internal_metadata.outlier = False
+
+ builder = self.event_builder_factory.new(
+ unfreeze(event.get_pdu_json())
+ )
+
+ builder.event_id = self.event_builder_factory.create_event_id()
+ builder.origin = self.hs.hostname
+
+ if not hasattr(event, "signatures"):
+ builder.signatures = {}
+
+ add_hashes_and_signatures(
+ builder,
+ self.hs.hostname,
+ self.hs.config.signing_key[0],
+ )
+
+ return builder.build()
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_make_leave_request(self, room_id, user_id):
+ """ We've received a /make_leave/ request, so we create a partial
+ join event for the room and return that. We do *not* persist or
+ process it until the other server has signed it and sent it back.
+ """
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "content": {"membership": Membership.LEAVE},
+ "room_id": room_id,
+ "sender": user_id,
+ "state_key": user_id,
+ })
+
+ event, context = yield self._create_new_client_event(
+ builder=builder,
+ )
+
+ self.auth.check(event, auth_events=context.current_state)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_send_leave_request(self, origin, pdu):
+ """ We have received a leave event for a room. Fully process it."""
+ event = pdu
+
+ logger.debug(
+ "on_send_leave_request: Got event: %s, signatures: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ event.internal_metadata.outlier = False
+
+ context, event_stream_id, max_stream_id = yield self._handle_new_event(
+ origin, event
+ )
+
+ logger.debug(
+ "on_send_leave_request: After _handle_new_event: %s, sigs: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
+
+ with PreserveLoggingContext():
+ d = self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id, extra_users=extra_users
+ )
+
+ def log_failure(f):
+ logger.warn(
+ "Failed to notify about %s: %s",
+ event.event_id, f.value
+ )
+
+ d.addErrback(log_failure)
+
+ new_pdu = event
+
+ destinations = set()
+
+ for k, s in context.current_state.items():
+ try:
+ if k[0] == EventTypes.Member:
+ if s.content["membership"] == Membership.LEAVE:
+ destinations.add(
+ UserID.from_string(s.state_key).domain
+ )
+ except:
+ logger.warn(
+ "Failed to get destination from event %s", s.event_id
+ )
+
+ destinations.discard(origin)
+
+ logger.debug(
+ "on_send_leave_request: Sending event: %s, signatures: %s",
+ event.event_id,
+ event.signatures,
+ )
+
+ self.replication_layer.send_pdu(new_pdu, destinations)
+
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
+ yield run_on_reactor()
+
+ if do_auth:
+ in_room = yield self.auth.check_host_in_room(room_id, origin)
+ if not in_room:
+ raise AuthError(403, "Host not in room.")
+
+ state_groups = yield self.store.get_state_groups(
+ room_id, [event_id]
+ )
+
+ if state_groups:
+ _, state = state_groups.items().pop()
+ results = {
+ (e.type, e.state_key): e for e in state
+ }
+
+ event = yield self.store.get_event(event_id)
+ if event and event.is_state():
+ # Get previous state
+ if "replaces_state" in event.unsigned:
+ prev_id = event.unsigned["replaces_state"]
+ if prev_id != event.event_id:
+ prev_event = yield self.store.get_event(prev_id)
+ results[(event.type, event.state_key)] = prev_event
+ else:
+ del results[(event.type, event.state_key)]
+
+ res = results.values()
+ for event in res:
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+
+ defer.returnValue(res)
+ else:
+ defer.returnValue([])
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_backfill_request(self, origin, room_id, pdu_list, limit):
+ in_room = yield self.auth.check_host_in_room(room_id, origin)
+ if not in_room:
+ raise AuthError(403, "Host not in room.")
+
+ events = yield self.store.get_backfill_events(
+ room_id,
+ pdu_list,
+ limit
+ )
+
+ events = yield self._filter_events_for_server(origin, room_id, events)
+
+ defer.returnValue(events)
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_persisted_pdu(self, origin, event_id, do_auth=True):
+ """ Get a PDU from the database with given origin and id.
+
+ Returns:
+ Deferred: Results in a `Pdu`.
+ """
+ event = yield self.store.get_event(
+ event_id,
+ allow_none=True,
+ allow_rejected=True,
+ )
+
+ if event:
+ # FIXME: This is a temporary work around where we occasionally
+ # return events slightly differently than when they were
+ # originally signed
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+
+ if do_auth:
+ in_room = yield self.auth.check_host_in_room(
+ event.room_id,
+ origin
+ )
+ if not in_room:
+ raise AuthError(403, "Host not in room.")
+
+ defer.returnValue(event)
+ else:
+ defer.returnValue(None)
+
+ @log_function
+ def get_min_depth_for_context(self, context):
+ return self.store.get_min_depth(context)
+
+ @log_function
+ def _on_user_joined(self, user, room_id):
+ waiters = self.waiting_for_join_list.get(
+ (user.to_string(), room_id),
+ []
+ )
+ while waiters:
+ waiters.pop().callback(None)
+
+ @defer.inlineCallbacks
+ @log_function
+ def _handle_new_event(self, origin, event, state=None, backfilled=False,
+ current_state=None, auth_events=None):
+
+ outlier = event.internal_metadata.is_outlier()
+
+ context = yield self._prep_event(
+ origin, event,
+ state=state,
+ auth_events=auth_events,
+ )
+
+ event_stream_id, max_stream_id = yield self.store.persist_event(
+ event,
+ context=context,
+ backfilled=backfilled,
+ is_new_state=(not outlier and not backfilled),
+ current_state=current_state,
+ )
+
+ defer.returnValue((context, event_stream_id, max_stream_id))
+
+ @defer.inlineCallbacks
+ def _handle_new_events(self, origin, event_infos, backfilled=False,
+ outliers=False):
+ contexts = yield defer.gatherResults(
+ [
+ self._prep_event(
+ origin,
+ ev_info["event"],
+ state=ev_info.get("state"),
+ auth_events=ev_info.get("auth_events"),
+ )
+ for ev_info in event_infos
+ ]
+ )
+
+ yield self.store.persist_events(
+ [
+ (ev_info["event"], context)
+ for ev_info, context in itertools.izip(event_infos, contexts)
+ ],
+ backfilled=backfilled,
+ is_new_state=(not outliers and not backfilled),
+ )
+
+ @defer.inlineCallbacks
+ def _persist_auth_tree(self, auth_events, state, event):
+ """Checks the auth chain is valid (and passes auth checks) for the
+ state and event. Then persists the auth chain and state atomically.
+ Persists the event seperately.
+
+ Returns:
+ 2-tuple of (event_stream_id, max_stream_id) from the persist_event
+ call for `event`
+ """
+ events_to_context = {}
+ for e in itertools.chain(auth_events, state):
+ ctx = yield self.state_handler.compute_event_context(
+ e, outlier=True,
+ )
+ events_to_context[e.event_id] = ctx
+ e.internal_metadata.outlier = True
+
+ event_map = {
+ e.event_id: e
+ for e in auth_events
+ }
+
+ create_event = None
+ for e in auth_events:
+ if (e.type, e.state_key) == (EventTypes.Create, ""):
+ create_event = e
+ break
+
+ for e in itertools.chain(auth_events, state, [event]):
+ auth_for_e = {
+ (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
+ for e_id, _ in e.auth_events
+ }
+ if create_event:
+ auth_for_e[(EventTypes.Create, "")] = create_event
+
+ try:
+ self.auth.check(e, auth_events=auth_for_e)
+ except AuthError as err:
+ logger.warn(
+ "Rejecting %s because %s",
+ e.event_id, err.msg
+ )
+
+ if e == event:
+ raise
+ events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
+
+ yield self.store.persist_events(
+ [
+ (e, events_to_context[e.event_id])
+ for e in itertools.chain(auth_events, state)
+ ],
+ is_new_state=False,
+ )
+
+ new_event_context = yield self.state_handler.compute_event_context(
+ event, old_state=state, outlier=False,
+ )
+
+ event_stream_id, max_stream_id = yield self.store.persist_event(
+ event, new_event_context,
+ backfilled=False,
+ is_new_state=True,
+ current_state=state,
+ )
+
+ defer.returnValue((event_stream_id, max_stream_id))
+
+ @defer.inlineCallbacks
+ def _prep_event(self, origin, event, state=None, auth_events=None):
+ outlier = event.internal_metadata.is_outlier()
+
+ context = yield self.state_handler.compute_event_context(
+ event, old_state=state, outlier=outlier,
+ )
+
+ if not auth_events:
+ auth_events = context.current_state
+
+ # This is a hack to fix some old rooms where the initial join event
+ # didn't reference the create event in its auth events.
+ if event.type == EventTypes.Member and not event.auth_events:
+ if len(event.prev_events) == 1 and event.depth < 5:
+ c = yield self.store.get_event(
+ event.prev_events[0][0],
+ allow_none=True,
+ )
+ if c and c.type == EventTypes.Create:
+ auth_events[(c.type, c.state_key)] = c
+
+ try:
+ yield self.do_auth(
+ origin, event, context, auth_events=auth_events
+ )
+ except AuthError as e:
+ logger.warn(
+ "Rejecting %s because %s",
+ event.event_id, e.msg
+ )
+
+ context.rejected = RejectedReason.AUTH_ERROR
+
+ if event.type == EventTypes.GuestAccess:
+ full_context = yield self.store.get_current_state(room_id=event.room_id)
+ yield self.maybe_kick_guest_users(event, full_context)
+
+ defer.returnValue(context)
+
+ @defer.inlineCallbacks
+ def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
+ missing):
+ # Just go through and process each event in `remote_auth_chain`. We
+ # don't want to fall into the trap of `missing` being wrong.
+ for e in remote_auth_chain:
+ try:
+ yield self._handle_new_event(origin, e)
+ except AuthError:
+ pass
+
+ # Now get the current auth_chain for the event.
+ local_auth_chain = yield self.store.get_auth_chain([event_id])
+
+ # TODO: Check if we would now reject event_id. If so we need to tell
+ # everyone.
+
+ ret = yield self.construct_auth_difference(
+ local_auth_chain, remote_auth_chain
+ )
+
+ for event in ret["auth_chain"]:
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
+ )
+
+ logger.debug("on_query_auth returning: %s", ret)
+
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def on_get_missing_events(self, origin, room_id, earliest_events,
+ latest_events, limit, min_depth):
+ in_room = yield self.auth.check_host_in_room(
+ room_id,
+ origin
+ )
+ if not in_room:
+ raise AuthError(403, "Host not in room.")
+
+ limit = min(limit, 20)
+ min_depth = max(min_depth, 0)
+
+ missing_events = yield self.store.get_missing_events(
+ room_id=room_id,
+ earliest_events=earliest_events,
+ latest_events=latest_events,
+ limit=limit,
+ min_depth=min_depth,
+ )
+
+ defer.returnValue(missing_events)
+
+ @defer.inlineCallbacks
+ @log_function
+ def do_auth(self, origin, event, context, auth_events):
+ # Check if we have all the auth events.
+ current_state = set(e.event_id for e in auth_events.values())
+ event_auth_events = set(e_id for e_id, _ in event.auth_events)
+
+ if event_auth_events - current_state:
+ have_events = yield self.store.have_events(
+ event_auth_events - current_state
+ )
+ else:
+ have_events = {}
+
+ have_events.update({
+ e.event_id: ""
+ for e in auth_events.values()
+ })
+
+ seen_events = set(have_events.keys())
+
+ missing_auth = event_auth_events - seen_events - current_state
+
+ if missing_auth:
+ logger.info("Missing auth: %s", missing_auth)
+ # If we don't have all the auth events, we need to get them.
+ try:
+ remote_auth_chain = yield self.replication_layer.get_event_auth(
+ origin, event.room_id, event.event_id
+ )
+
+ seen_remotes = yield self.store.have_events(
+ [e.event_id for e in remote_auth_chain]
+ )
+
+ for e in remote_auth_chain:
+ if e.event_id in seen_remotes.keys():
+ continue
+
+ if e.event_id == event.event_id:
+ continue
+
+ try:
+ auth_ids = [e_id for e_id, _ in e.auth_events]
+ auth = {
+ (e.type, e.state_key): e for e in remote_auth_chain
+ if e.event_id in auth_ids or e.type == EventTypes.Create
+ }
+ e.internal_metadata.outlier = True
+
+ logger.debug(
+ "do_auth %s missing_auth: %s",
+ event.event_id, e.event_id
+ )
+ yield self._handle_new_event(
+ origin, e, auth_events=auth
+ )
+
+ if e.event_id in event_auth_events:
+ auth_events[(e.type, e.state_key)] = e
+ except AuthError:
+ pass
+
+ have_events = yield self.store.have_events(
+ [e_id for e_id, _ in event.auth_events]
+ )
+ seen_events = set(have_events.keys())
+ except:
+ # FIXME:
+ logger.exception("Failed to get auth chain")
+
+ # FIXME: Assumes we have and stored all the state for all the
+ # prev_events
+ current_state = set(e.event_id for e in auth_events.values())
+ different_auth = event_auth_events - current_state
+
+ if different_auth and not event.internal_metadata.is_outlier():
+ # Do auth conflict res.
+ logger.info("Different auth: %s", different_auth)
+
+ different_events = yield defer.gatherResults(
+ [
+ self.store.get_event(
+ d,
+ allow_none=True,
+ allow_rejected=False,
+ )
+ for d in different_auth
+ if d in have_events and not have_events[d]
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ if different_events:
+ local_view = dict(auth_events)
+ remote_view = dict(auth_events)
+ remote_view.update({
+ (d.type, d.state_key): d for d in different_events
+ })
+
+ new_state, prev_state = self.state_handler.resolve_events(
+ [local_view.values(), remote_view.values()],
+ event
+ )
+
+ auth_events.update(new_state)
+
+ current_state = set(e.event_id for e in auth_events.values())
+ different_auth = event_auth_events - current_state
+
+ context.current_state.update(auth_events)
+ context.state_group = None
+
+ if different_auth and not event.internal_metadata.is_outlier():
+ logger.info("Different auth after resolution: %s", different_auth)
+
+ # Only do auth resolution if we have something new to say.
+ # We can't rove an auth failure.
+ do_resolution = False
+
+ provable = [
+ RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
+ ]
+
+ for e_id in different_auth:
+ if e_id in have_events:
+ if have_events[e_id] in provable:
+ do_resolution = True
+ break
+
+ if do_resolution:
+ # 1. Get what we think is the auth chain.
+ auth_ids = self.auth.compute_auth_events(
+ event, context.current_state
+ )
+ local_auth_chain = yield self.store.get_auth_chain(auth_ids)
+
+ try:
+ # 2. Get remote difference.
+ result = yield self.replication_layer.query_auth(
+ origin,
+ event.room_id,
+ event.event_id,
+ local_auth_chain,
+ )
+
+ seen_remotes = yield self.store.have_events(
+ [e.event_id for e in result["auth_chain"]]
+ )
+
+ # 3. Process any remote auth chain events we haven't seen.
+ for ev in result["auth_chain"]:
+ if ev.event_id in seen_remotes.keys():
+ continue
+
+ if ev.event_id == event.event_id:
+ continue
+
+ try:
+ auth_ids = [e_id for e_id, _ in ev.auth_events]
+ auth = {
+ (e.type, e.state_key): e
+ for e in result["auth_chain"]
+ if e.event_id in auth_ids
+ or event.type == EventTypes.Create
+ }
+ ev.internal_metadata.outlier = True
+
+ logger.debug(
+ "do_auth %s different_auth: %s",
+ event.event_id, e.event_id
+ )
+
+ yield self._handle_new_event(
+ origin, ev, auth_events=auth
+ )
+
+ if ev.event_id in event_auth_events:
+ auth_events[(ev.type, ev.state_key)] = ev
+ except AuthError:
+ pass
+
+ except:
+ # FIXME:
+ logger.exception("Failed to query auth chain")
+
+ # 4. Look at rejects and their proofs.
+ # TODO.
+
+ context.current_state.update(auth_events)
+ context.state_group = None
+
+ try:
+ self.auth.check(event, auth_events=auth_events)
+ except AuthError:
+ raise
+
+ @defer.inlineCallbacks
+ def construct_auth_difference(self, local_auth, remote_auth):
+ """ Given a local and remote auth chain, find the differences. This
+ assumes that we have already processed all events in remote_auth
+
+ Params:
+ local_auth (list)
+ remote_auth (list)
+
+ Returns:
+ dict
+ """
+
+ logger.debug("construct_auth_difference Start!")
+
+ # TODO: Make sure we are OK with local_auth or remote_auth having more
+ # auth events in them than strictly necessary.
+
+ def sort_fun(ev):
+ return ev.depth, ev.event_id
+
+ logger.debug("construct_auth_difference after sort_fun!")
+
+ # We find the differences by starting at the "bottom" of each list
+ # and iterating up on both lists. The lists are ordered by depth and
+ # then event_id, we iterate up both lists until we find the event ids
+ # don't match. Then we look at depth/event_id to see which side is
+ # missing that event, and iterate only up that list. Repeat.
+
+ remote_list = list(remote_auth)
+ remote_list.sort(key=sort_fun)
+
+ local_list = list(local_auth)
+ local_list.sort(key=sort_fun)
+
+ local_iter = iter(local_list)
+ remote_iter = iter(remote_list)
+
+ logger.debug("construct_auth_difference before get_next!")
+
+ def get_next(it, opt=None):
+ try:
+ return it.next()
+ except:
+ return opt
+
+ current_local = get_next(local_iter)
+ current_remote = get_next(remote_iter)
+
+ logger.debug("construct_auth_difference before while")
+
+ missing_remotes = []
+ missing_locals = []
+ while current_local or current_remote:
+ if current_remote is None:
+ missing_locals.append(current_local)
+ current_local = get_next(local_iter)
+ continue
+
+ if current_local is None:
+ missing_remotes.append(current_remote)
+ current_remote = get_next(remote_iter)
+ continue
+
+ if current_local.event_id == current_remote.event_id:
+ current_local = get_next(local_iter)
+ current_remote = get_next(remote_iter)
+ continue
+
+ if current_local.depth < current_remote.depth:
+ missing_locals.append(current_local)
+ current_local = get_next(local_iter)
+ continue
+
+ if current_local.depth > current_remote.depth:
+ missing_remotes.append(current_remote)
+ current_remote = get_next(remote_iter)
+ continue
+
+ # They have the same depth, so we fall back to the event_id order
+ if current_local.event_id < current_remote.event_id:
+ missing_locals.append(current_local)
+ current_local = get_next(local_iter)
+
+ if current_local.event_id > current_remote.event_id:
+ missing_remotes.append(current_remote)
+ current_remote = get_next(remote_iter)
+ continue
+
+ logger.debug("construct_auth_difference after while")
+
+ # missing locals should be sent to the server
+ # We should find why we are missing remotes, as they will have been
+ # rejected.
+
+ # Remove events from missing_remotes if they are referencing a missing
+ # remote. We only care about the "root" rejected ones.
+ missing_remote_ids = [e.event_id for e in missing_remotes]
+ base_remote_rejected = list(missing_remotes)
+ for e in missing_remotes:
+ for e_id, _ in e.auth_events:
+ if e_id in missing_remote_ids:
+ try:
+ base_remote_rejected.remove(e)
+ except ValueError:
+ pass
+
+ reason_map = {}
+
+ for e in base_remote_rejected:
+ reason = yield self.store.get_rejection_reason(e.event_id)
+ if reason is None:
+ # TODO: e is not in the current state, so we should
+ # construct some proof of that.
+ continue
+
+ reason_map[e.event_id] = reason
+
+ if reason == RejectedReason.AUTH_ERROR:
+ pass
+ elif reason == RejectedReason.REPLACED:
+ # TODO: Get proof
+ pass
+ elif reason == RejectedReason.NOT_ANCESTOR:
+ # TODO: Get proof.
+ pass
+
+ logger.debug("construct_auth_difference returning")
+
+ defer.returnValue({
+ "auth_chain": local_auth,
+ "rejects": {
+ e.event_id: {
+ "reason": reason_map[e.event_id],
+ "proof": None,
+ }
+ for e in base_remote_rejected
+ },
+ "missing": [e.event_id for e in missing_locals],
+ })
+
+ @defer.inlineCallbacks
+ @log_function
+ def exchange_third_party_invite(self, invite):
+ sender = invite["sender"]
+ room_id = invite["room_id"]
+
+ event_dict = {
+ "type": EventTypes.Member,
+ "content": {
+ "membership": Membership.INVITE,
+ "third_party_invite": invite,
+ },
+ "room_id": room_id,
+ "sender": sender,
+ "state_key": invite["mxid"],
+ }
+
+ if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
+ builder = self.event_builder_factory.new(event_dict)
+ EventValidator().validate_new(builder)
+ event, context = yield self._create_new_client_event(builder=builder)
+ self.auth.check(event, context.current_state)
+ yield self._validate_keyserver(event, auth_events=context.current_state)
+ member_handler = self.hs.get_handlers().room_member_handler
+ yield member_handler.change_membership(event, context)
+ else:
+ destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)])
+ yield self.replication_layer.forward_third_party_invite(
+ destinations,
+ room_id,
+ event_dict,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ builder = self.event_builder_factory.new(event_dict)
+
+ event, context = yield self._create_new_client_event(
+ builder=builder,
+ )
+
+ self.auth.check(event, auth_events=context.current_state)
+ yield self._validate_keyserver(event, auth_events=context.current_state)
+
+ returned_invite = yield self.send_invite(origin, event)
+ # TODO: Make sure the signatures actually are correct.
+ event.signatures.update(returned_invite.signatures)
+ member_handler = self.hs.get_handlers().room_member_handler
+ yield member_handler.change_membership(event, context)
+
+ @defer.inlineCallbacks
+ def _validate_keyserver(self, event, auth_events):
+ token = event.content["third_party_invite"]["signed"]["token"]
+
+ invite_event = auth_events.get(
+ (EventTypes.ThirdPartyInvite, token,)
+ )
+
+ try:
+ response = yield self.hs.get_simple_http_client().get_json(
+ invite_event.content["key_validity_url"],
+ {"public_key": invite_event.content["public_key"]}
+ )
+ except Exception:
+ raise SynapseError(
+ 502,
+ "Third party certificate could not be checked"
+ )
+ if "valid" not in response or not response["valid"]:
+ raise AuthError(403, "Third party certificate was invalid")
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
new file mode 100644
index 00000000..2a99921d
--- /dev/null
+++ b/synapse/handlers/identity.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for interacting with Identity Servers"""
+from twisted.internet import defer
+
+from synapse.api.errors import (
+ CodeMessageException
+)
+from ._base import BaseHandler
+from synapse.http.client import SimpleHttpClient
+from synapse.util.async import run_on_reactor
+from synapse.api.errors import SynapseError
+
+import json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class IdentityHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(IdentityHandler, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def threepid_from_creds(self, creds):
+ yield run_on_reactor()
+
+ # TODO: get this from the homeserver rather than creating a new one for
+ # each request
+ http_client = SimpleHttpClient(self.hs)
+ # XXX: make this configurable!
+ # trustedIdServers = ['matrix.org', 'localhost:8090']
+ trustedIdServers = ['matrix.org', 'vector.im']
+
+ if 'id_server' in creds:
+ id_server = creds['id_server']
+ elif 'idServer' in creds:
+ id_server = creds['idServer']
+ else:
+ raise SynapseError(400, "No id_server in creds")
+
+ if 'client_secret' in creds:
+ client_secret = creds['client_secret']
+ elif 'clientSecret' in creds:
+ client_secret = creds['clientSecret']
+ else:
+ raise SynapseError(400, "No client_secret in creds")
+
+ if id_server not in trustedIdServers:
+ logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
+ 'credentials', id_server)
+ defer.returnValue(None)
+
+ data = {}
+ try:
+ data = yield http_client.get_json(
+ "https://%s%s" % (
+ id_server,
+ "/_matrix/identity/api/v1/3pid/getValidated3pid"
+ ),
+ {'sid': creds['sid'], 'client_secret': client_secret}
+ )
+ except CodeMessageException as e:
+ data = json.loads(e.msg)
+
+ if 'medium' in data:
+ defer.returnValue(data)
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def bind_threepid(self, creds, mxid):
+ yield run_on_reactor()
+ logger.debug("binding threepid %r to %s", creds, mxid)
+ http_client = SimpleHttpClient(self.hs)
+ data = None
+
+ if 'id_server' in creds:
+ id_server = creds['id_server']
+ elif 'idServer' in creds:
+ id_server = creds['idServer']
+ else:
+ raise SynapseError(400, "No id_server in creds")
+
+ if 'client_secret' in creds:
+ client_secret = creds['client_secret']
+ elif 'clientSecret' in creds:
+ client_secret = creds['clientSecret']
+ else:
+ raise SynapseError(400, "No client_secret in creds")
+
+ try:
+ data = yield http_client.post_urlencoded_get_json(
+ "https://%s%s" % (
+ id_server, "/_matrix/identity/api/v1/3pid/bind"
+ ),
+ {
+ 'sid': creds['sid'],
+ 'client_secret': client_secret,
+ 'mxid': mxid,
+ }
+ )
+ logger.debug("bound threepid %r to %s", creds, mxid)
+ except CodeMessageException as e:
+ data = json.loads(e.msg)
+ defer.returnValue(data)
+
+ @defer.inlineCallbacks
+ def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
+ yield run_on_reactor()
+ http_client = SimpleHttpClient(self.hs)
+
+ params = {
+ 'email': email,
+ 'client_secret': client_secret,
+ 'send_attempt': send_attempt,
+ }
+ params.update(kwargs)
+
+ try:
+ data = yield http_client.post_urlencoded_get_json(
+ "https://%s%s" % (
+ id_server,
+ "/_matrix/identity/api/v1/validate/email/requestToken"
+ ),
+ params
+ )
+ defer.returnValue(data)
+ except CodeMessageException as e:
+ logger.info("Proxied requestToken failed: %r", e)
+ raise e
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
new file mode 100644
index 00000000..14051aee
--- /dev/null
+++ b/synapse/handlers/message.py
@@ -0,0 +1,640 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.api.errors import SynapseError, AuthError, Codes
+from synapse.streams.config import PaginationConfig
+from synapse.events.utils import serialize_event
+from synapse.events.validator import EventValidator
+from synapse.util import unwrapFirstError
+from synapse.util.logcontext import PreserveLoggingContext
+from synapse.types import UserID, RoomStreamToken, StreamToken
+
+from ._base import BaseHandler
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class MessageHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(MessageHandler, self).__init__(hs)
+ self.hs = hs
+ self.state = hs.get_state_handler()
+ self.clock = hs.get_clock()
+ self.validator = EventValidator()
+
+ @defer.inlineCallbacks
+ def get_message(self, msg_id=None, room_id=None, sender_id=None,
+ user_id=None):
+ """ Retrieve a message.
+
+ Args:
+ msg_id (str): The message ID to obtain.
+ room_id (str): The room where the message resides.
+ sender_id (str): The user ID of the user who sent the message.
+ user_id (str): The user ID of the user making this request.
+ Returns:
+ The message, or None if no message exists.
+ Raises:
+ SynapseError if something went wrong.
+ """
+ yield self.auth.check_joined_room(room_id, user_id)
+
+ # Pull out the message from the db
+# msg = yield self.store.get_message(
+# room_id=room_id,
+# msg_id=msg_id,
+# user_id=sender_id
+# )
+
+ # TODO (erikj): Once we work out the correct c-s api we need to think
+ # on how to do this.
+
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def get_messages(self, user_id=None, room_id=None, pagin_config=None,
+ as_client_event=True, is_guest=False):
+ """Get messages in a room.
+
+ Args:
+ user_id (str): The user requesting messages.
+ room_id (str): The room they want messages from.
+ pagin_config (synapse.api.streams.PaginationConfig): The pagination
+ config rules to apply, if any.
+ as_client_event (bool): True to get events in client-server format.
+ is_guest (bool): Whether the requesting user is a guest (as opposed
+ to a fully registered user).
+ Returns:
+ dict: Pagination API results
+ """
+ data_source = self.hs.get_event_sources().sources["room"]
+
+ if pagin_config.from_token:
+ room_token = pagin_config.from_token.room_key
+ else:
+ pagin_config.from_token = (
+ yield self.hs.get_event_sources().get_current_token(
+ direction='b'
+ )
+ )
+ room_token = pagin_config.from_token.room_key
+
+ room_token = RoomStreamToken.parse(room_token)
+ if room_token.topological is None:
+ raise SynapseError(400, "Invalid token")
+
+ pagin_config.from_token = pagin_config.from_token.copy_and_replace(
+ "room_key", str(room_token)
+ )
+
+ source_config = pagin_config.get_source_config("room")
+
+ if not is_guest:
+ member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
+ if member_event.membership == Membership.LEAVE:
+ # If they have left the room then clamp the token to be before
+ # they left the room.
+ # If they're a guest, we'll just 403 them if they're asking for
+ # events they can't see.
+ leave_token = yield self.store.get_topological_token_for_event(
+ member_event.event_id
+ )
+ leave_token = RoomStreamToken.parse(leave_token)
+ if leave_token.topological < room_token.topological:
+ source_config.from_key = str(leave_token)
+
+ if source_config.direction == "f":
+ if source_config.to_key is None:
+ source_config.to_key = str(leave_token)
+ else:
+ to_token = RoomStreamToken.parse(source_config.to_key)
+ if leave_token.topological < to_token.topological:
+ source_config.to_key = str(leave_token)
+
+ yield self.hs.get_handlers().federation_handler.maybe_backfill(
+ room_id, room_token.topological
+ )
+
+ user = UserID.from_string(user_id)
+
+ events, next_key = yield data_source.get_pagination_rows(
+ user, source_config, room_id
+ )
+
+ next_token = pagin_config.from_token.copy_and_replace(
+ "room_key", next_key
+ )
+
+ if not events:
+ defer.returnValue({
+ "chunk": [],
+ "start": pagin_config.from_token.to_string(),
+ "end": next_token.to_string(),
+ })
+
+ events = yield self._filter_events_for_client(user_id, events, is_guest=is_guest)
+
+ time_now = self.clock.time_msec()
+
+ chunk = {
+ "chunk": [
+ serialize_event(e, time_now, as_client_event)
+ for e in events
+ ],
+ "start": pagin_config.from_token.to_string(),
+ "end": next_token.to_string(),
+ }
+
+ defer.returnValue(chunk)
+
+ @defer.inlineCallbacks
+ def create_and_send_event(self, event_dict, ratelimit=True,
+ token_id=None, txn_id=None, is_guest=False):
+ """ Given a dict from a client, create and handle a new event.
+
+ Creates an FrozenEvent object, filling out auth_events, prev_events,
+ etc.
+
+ Adds display names to Join membership events.
+
+ Persists and notifies local clients and federation.
+
+ Args:
+ event_dict (dict): An entire event
+ """
+ builder = self.event_builder_factory.new(event_dict)
+
+ self.validator.validate_new(builder)
+
+ if ratelimit:
+ self.ratelimit(builder.user_id)
+ # TODO(paul): Why does 'event' not have a 'user' object?
+ user = UserID.from_string(builder.user_id)
+ assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
+
+ if builder.type == EventTypes.Member:
+ membership = builder.content.get("membership", None)
+ if membership == Membership.JOIN:
+ joinee = UserID.from_string(builder.state_key)
+ # If event doesn't include a display name, add one.
+ yield self.distributor.fire(
+ "collect_presencelike_data",
+ joinee,
+ builder.content
+ )
+
+ if token_id is not None:
+ builder.internal_metadata.token_id = token_id
+
+ if txn_id is not None:
+ builder.internal_metadata.txn_id = txn_id
+
+ event, context = yield self._create_new_client_event(
+ builder=builder,
+ )
+
+ if event.type == EventTypes.Member:
+ member_handler = self.hs.get_handlers().room_member_handler
+ yield member_handler.change_membership(event, context, is_guest=is_guest)
+ else:
+ yield self.handle_new_client_event(
+ event=event,
+ context=context,
+ )
+
+ if event.type == EventTypes.Message:
+ presence = self.hs.get_handlers().presence_handler
+ with PreserveLoggingContext():
+ presence.bump_presence_active_time(user)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def get_room_data(self, user_id=None, room_id=None,
+ event_type=None, state_key="", is_guest=False):
+ """ Get data from a room.
+
+ Args:
+ event : The room path event
+ Returns:
+ The path data content.
+ Raises:
+ SynapseError if something went wrong.
+ """
+ membership, membership_event_id = yield self._check_in_room_or_world_readable(
+ room_id, user_id, is_guest
+ )
+
+ if membership == Membership.JOIN:
+ data = yield self.state_handler.get_current_state(
+ room_id, event_type, state_key
+ )
+ elif membership == Membership.LEAVE:
+ key = (event_type, state_key)
+ room_state = yield self.store.get_state_for_events(
+ [membership_event_id], [key]
+ )
+ data = room_state[membership_event_id].get(key)
+
+ defer.returnValue(data)
+
+ @defer.inlineCallbacks
+ def _check_in_room_or_world_readable(self, room_id, user_id, is_guest):
+ try:
+ # check_user_was_in_room will return the most recent membership
+ # event for the user if:
+ # * The user is a non-guest user, and was ever in the room
+ # * The user is a guest user, and has joined the room
+ # else it will throw.
+ member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
+ defer.returnValue((member_event.membership, member_event.event_id))
+ return
+ except AuthError, auth_error:
+ visibility = yield self.state_handler.get_current_state(
+ room_id, EventTypes.RoomHistoryVisibility, ""
+ )
+ if (
+ visibility and
+ visibility.content["history_visibility"] == "world_readable"
+ ):
+ defer.returnValue((Membership.JOIN, None))
+ return
+ if not is_guest:
+ raise auth_error
+ raise AuthError(
+ 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
+ )
+
+ @defer.inlineCallbacks
+ def get_state_events(self, user_id, room_id, is_guest=False):
+ """Retrieve all state events for a given room. If the user is
+ joined to the room then return the current state. If the user has
+ left the room return the state events from when they left.
+
+ Args:
+ user_id(str): The user requesting state events.
+ room_id(str): The room ID to get all state events from.
+ Returns:
+ A list of dicts representing state events. [{}, {}, {}]
+ """
+ membership, membership_event_id = yield self._check_in_room_or_world_readable(
+ room_id, user_id, is_guest
+ )
+
+ if membership == Membership.JOIN:
+ room_state = yield self.state_handler.get_current_state(room_id)
+ elif membership == Membership.LEAVE:
+ room_state = yield self.store.get_state_for_events(
+ [membership_event_id], None
+ )
+ room_state = room_state[membership_event_id]
+
+ now = self.clock.time_msec()
+ defer.returnValue(
+ [serialize_event(c, now) for c in room_state.values()]
+ )
+
+ @defer.inlineCallbacks
+ def snapshot_all_rooms(self, user_id=None, pagin_config=None,
+ as_client_event=True, include_archived=False):
+ """Retrieve a snapshot of all rooms the user is invited or has joined.
+
+ This snapshot may include messages for all rooms where the user is
+ joined, depending on the pagination config.
+
+ Args:
+ user_id (str): The ID of the user making the request.
+ pagin_config (synapse.api.streams.PaginationConfig): The pagination
+ config used to determine how many messages *PER ROOM* to return.
+ as_client_event (bool): True to get events in client-server format.
+ include_archived (bool): True to get rooms that the user has left
+ Returns:
+ A list of dicts with "room_id" and "membership" keys for all rooms
+ the user is currently invited or joined in on. Rooms where the user
+ is joined on, may return a "messages" key with messages, depending
+ on the specified PaginationConfig.
+ """
+ memberships = [Membership.INVITE, Membership.JOIN]
+ if include_archived:
+ memberships.append(Membership.LEAVE)
+
+ room_list = yield self.store.get_rooms_for_user_where_membership_is(
+ user_id=user_id, membership_list=memberships
+ )
+
+ user = UserID.from_string(user_id)
+
+ rooms_ret = []
+
+ now_token = yield self.hs.get_event_sources().get_current_token()
+
+ presence_stream = self.hs.get_event_sources().sources["presence"]
+ pagination_config = PaginationConfig(from_token=now_token)
+ presence, _ = yield presence_stream.get_pagination_rows(
+ user, pagination_config.get_source_config("presence"), None
+ )
+
+ receipt_stream = self.hs.get_event_sources().sources["receipt"]
+ receipt, _ = yield receipt_stream.get_pagination_rows(
+ user, pagination_config.get_source_config("receipt"), None
+ )
+
+ tags_by_room = yield self.store.get_tags_for_user(user_id)
+
+ public_room_ids = yield self.store.get_public_room_ids()
+
+ limit = pagin_config.limit
+ if limit is None:
+ limit = 10
+
+ @defer.inlineCallbacks
+ def handle_room(event):
+ d = {
+ "room_id": event.room_id,
+ "membership": event.membership,
+ "visibility": (
+ "public" if event.room_id in public_room_ids
+ else "private"
+ ),
+ }
+
+ if event.membership == Membership.INVITE:
+ time_now = self.clock.time_msec()
+ d["inviter"] = event.sender
+
+ invite_event = yield self.store.get_event(event.event_id)
+ d["invite"] = serialize_event(invite_event, time_now, as_client_event)
+
+ rooms_ret.append(d)
+
+ if event.membership not in (Membership.JOIN, Membership.LEAVE):
+ return
+
+ try:
+ if event.membership == Membership.JOIN:
+ room_end_token = now_token.room_key
+ deferred_room_state = self.state_handler.get_current_state(
+ event.room_id
+ )
+ elif event.membership == Membership.LEAVE:
+ room_end_token = "s%d" % (event.stream_ordering,)
+ deferred_room_state = self.store.get_state_for_events(
+ [event.event_id], None
+ )
+ deferred_room_state.addCallback(
+ lambda states: states[event.event_id]
+ )
+
+ (messages, token), current_state = yield defer.gatherResults(
+ [
+ self.store.get_recent_events_for_room(
+ event.room_id,
+ limit=limit,
+ end_token=room_end_token,
+ ),
+ deferred_room_state,
+ ]
+ ).addErrback(unwrapFirstError)
+
+ messages = yield self._filter_events_for_client(
+ user_id, messages
+ )
+
+ start_token = now_token.copy_and_replace("room_key", token[0])
+ end_token = now_token.copy_and_replace("room_key", token[1])
+ time_now = self.clock.time_msec()
+
+ d["messages"] = {
+ "chunk": [
+ serialize_event(m, time_now, as_client_event)
+ for m in messages
+ ],
+ "start": start_token.to_string(),
+ "end": end_token.to_string(),
+ }
+
+ d["state"] = [
+ serialize_event(c, time_now, as_client_event)
+ for c in current_state.values()
+ ]
+
+ private_user_data = []
+ tags = tags_by_room.get(event.room_id)
+ if tags:
+ private_user_data.append({
+ "type": "m.tag",
+ "content": {"tags": tags},
+ })
+ d["private_user_data"] = private_user_data
+ except:
+ logger.exception("Failed to get snapshot")
+
+ # Only do N rooms at once
+ n = 5
+ d_list = [handle_room(e) for e in room_list]
+ for i in range(0, len(d_list), n):
+ yield defer.gatherResults(
+ d_list[i:i + n],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ ret = {
+ "rooms": rooms_ret,
+ "presence": presence,
+ "receipts": receipt,
+ "end": now_token.to_string(),
+ }
+
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def room_initial_sync(self, user_id, room_id, pagin_config=None, is_guest=False):
+ """Capture the a snapshot of a room. If user is currently a member of
+ the room this will be what is currently in the room. If the user left
+ the room this will be what was in the room when they left.
+
+ Args:
+ user_id(str): The user to get a snapshot for.
+ room_id(str): The room to get a snapshot of.
+ pagin_config(synapse.streams.config.PaginationConfig):
+ The pagination config used to determine how many messages to
+ return.
+ Raises:
+ AuthError if the user wasn't in the room.
+ Returns:
+ A JSON serialisable dict with the snapshot of the room.
+ """
+
+ membership, member_event_id = yield self._check_in_room_or_world_readable(
+ room_id,
+ user_id,
+ is_guest
+ )
+
+ if membership == Membership.JOIN:
+ result = yield self._room_initial_sync_joined(
+ user_id, room_id, pagin_config, membership, is_guest
+ )
+ elif membership == Membership.LEAVE:
+ result = yield self._room_initial_sync_parted(
+ user_id, room_id, pagin_config, membership, member_event_id, is_guest
+ )
+
+ private_user_data = []
+ tags = yield self.store.get_tags_for_room(user_id, room_id)
+ if tags:
+ private_user_data.append({
+ "type": "m.tag",
+ "content": {"tags": tags},
+ })
+ result["private_user_data"] = private_user_data
+
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
+ membership, member_event_id, is_guest):
+ room_state = yield self.store.get_state_for_events(
+ [member_event_id], None
+ )
+
+ room_state = room_state[member_event_id]
+
+ limit = pagin_config.limit if pagin_config else None
+ if limit is None:
+ limit = 10
+
+ stream_token = yield self.store.get_stream_token_for_event(
+ member_event_id
+ )
+
+ messages, token = yield self.store.get_recent_events_for_room(
+ room_id,
+ limit=limit,
+ end_token=stream_token
+ )
+
+ messages = yield self._filter_events_for_client(
+ user_id, messages, is_guest=is_guest
+ )
+
+ start_token = StreamToken(token[0], 0, 0, 0, 0)
+ end_token = StreamToken(token[1], 0, 0, 0, 0)
+
+ time_now = self.clock.time_msec()
+
+ defer.returnValue({
+ "membership": membership,
+ "room_id": room_id,
+ "messages": {
+ "chunk": [serialize_event(m, time_now) for m in messages],
+ "start": start_token.to_string(),
+ "end": end_token.to_string(),
+ },
+ "state": [serialize_event(s, time_now) for s in room_state.values()],
+ "presence": [],
+ "receipts": [],
+ })
+
+ @defer.inlineCallbacks
+ def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
+ membership, is_guest):
+ current_state = yield self.state.get_current_state(
+ room_id=room_id,
+ )
+
+ # TODO(paul): I wish I was called with user objects not user_id
+ # strings...
+ auth_user = UserID.from_string(user_id)
+
+ # TODO: These concurrently
+ time_now = self.clock.time_msec()
+ state = [
+ serialize_event(x, time_now)
+ for x in current_state.values()
+ ]
+
+ now_token = yield self.hs.get_event_sources().get_current_token()
+
+ limit = pagin_config.limit if pagin_config else None
+ if limit is None:
+ limit = 10
+
+ room_members = [
+ m for m in current_state.values()
+ if m.type == EventTypes.Member
+ and m.content["membership"] == Membership.JOIN
+ ]
+
+ presence_handler = self.hs.get_handlers().presence_handler
+
+ @defer.inlineCallbacks
+ def get_presence():
+ states = {}
+ if not is_guest:
+ states = yield presence_handler.get_states(
+ target_users=[UserID.from_string(m.user_id) for m in room_members],
+ auth_user=auth_user,
+ as_event=True,
+ check_auth=False,
+ )
+
+ defer.returnValue(states.values())
+
+ receipts_handler = self.hs.get_handlers().receipts_handler
+
+ presence, receipts, (messages, token) = yield defer.gatherResults(
+ [
+ get_presence(),
+ receipts_handler.get_receipts_for_room(room_id, now_token.receipt_key),
+ self.store.get_recent_events_for_room(
+ room_id,
+ limit=limit,
+ end_token=now_token.room_key,
+ )
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ messages = yield self._filter_events_for_client(
+ user_id, messages, is_guest=is_guest, require_all_visible_for_guests=False
+ )
+
+ start_token = now_token.copy_and_replace("room_key", token[0])
+ end_token = now_token.copy_and_replace("room_key", token[1])
+
+ time_now = self.clock.time_msec()
+
+ ret = {
+ "room_id": room_id,
+ "messages": {
+ "chunk": [serialize_event(m, time_now) for m in messages],
+ "start": start_token.to_string(),
+ "end": end_token.to_string(),
+ },
+ "state": state,
+ "presence": presence,
+ "receipts": receipts,
+ }
+ if not is_guest:
+ ret["membership"] = membership
+
+ defer.returnValue(ret)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
new file mode 100644
index 00000000..aca65096
--- /dev/null
+++ b/synapse/handlers/presence.py
@@ -0,0 +1,1302 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError, AuthError
+from synapse.api.constants import PresenceState
+
+from synapse.util.logcontext import PreserveLoggingContext
+from synapse.util.logutils import log_function
+from synapse.types import UserID
+import synapse.metrics
+
+from ._base import BaseHandler
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+
+# Don't bother bumping "last active" time if it differs by less than 60 seconds
+LAST_ACTIVE_GRANULARITY = 60*1000
+
+# Keep no more than this number of offline serial revisions
+MAX_OFFLINE_SERIALS = 1000
+
+
+# TODO(paul): Maybe there's one of these I can steal from somewhere
+def partition(l, func):
+ """Partition the list by the result of func applied to each element."""
+ ret = {}
+
+ for x in l:
+ key = func(x)
+ if key not in ret:
+ ret[key] = []
+ ret[key].append(x)
+
+ return ret
+
+
+def partitionbool(l, func):
+ def boolfunc(x):
+ return bool(func(x))
+
+ ret = partition(l, boolfunc)
+ return ret.get(True, []), ret.get(False, [])
+
+
+class PresenceHandler(BaseHandler):
+
+ STATE_LEVELS = {
+ PresenceState.OFFLINE: 0,
+ PresenceState.UNAVAILABLE: 1,
+ PresenceState.ONLINE: 2,
+ PresenceState.FREE_FOR_CHAT: 3,
+ }
+
+ def __init__(self, hs):
+ super(PresenceHandler, self).__init__(hs)
+
+ self.homeserver = hs
+
+ self.clock = hs.get_clock()
+
+ distributor = hs.get_distributor()
+ distributor.observe("registered_user", self.registered_user)
+
+ distributor.observe(
+ "started_user_eventstream", self.started_user_eventstream
+ )
+ distributor.observe(
+ "stopped_user_eventstream", self.stopped_user_eventstream
+ )
+
+ distributor.observe("user_joined_room", self.user_joined_room)
+
+ distributor.declare("collect_presencelike_data")
+
+ distributor.declare("changed_presencelike_data")
+ distributor.observe(
+ "changed_presencelike_data", self.changed_presencelike_data
+ )
+
+ # outbound signal from the presence module to advertise when a user's
+ # presence has changed
+ distributor.declare("user_presence_changed")
+
+ self.distributor = distributor
+
+ self.federation = hs.get_replication_layer()
+
+ self.federation.register_edu_handler(
+ "m.presence", self.incoming_presence
+ )
+ self.federation.register_edu_handler(
+ "m.presence_invite",
+ lambda origin, content: self.invite_presence(
+ observed_user=UserID.from_string(content["observed_user"]),
+ observer_user=UserID.from_string(content["observer_user"]),
+ )
+ )
+ self.federation.register_edu_handler(
+ "m.presence_accept",
+ lambda origin, content: self.accept_presence(
+ observed_user=UserID.from_string(content["observed_user"]),
+ observer_user=UserID.from_string(content["observer_user"]),
+ )
+ )
+ self.federation.register_edu_handler(
+ "m.presence_deny",
+ lambda origin, content: self.deny_presence(
+ observed_user=UserID.from_string(content["observed_user"]),
+ observer_user=UserID.from_string(content["observer_user"]),
+ )
+ )
+
+ # IN-MEMORY store, mapping local userparts to sets of local users to
+ # be informed of state changes.
+ self._local_pushmap = {}
+ # map local users to sets of remote /domain names/ who are interested
+ # in them
+ self._remote_sendmap = {}
+ # map remote users to sets of local users who're interested in them
+ self._remote_recvmap = {}
+ # list of (serial, set of(userids)) tuples, ordered by serial, latest
+ # first
+ self._remote_offline_serials = []
+
+ # map any user to a UserPresenceCache
+ self._user_cachemap = {}
+ self._user_cachemap_latest_serial = 0
+
+ # map room_ids to the latest presence serial for a member of that
+ # room
+ self._room_serials = {}
+
+ metrics.register_callback(
+ "userCachemap:size",
+ lambda: len(self._user_cachemap),
+ )
+
+ def _get_or_make_usercache(self, user):
+ """If the cache entry doesn't exist, initialise a new one."""
+ if user not in self._user_cachemap:
+ self._user_cachemap[user] = UserPresenceCache()
+ return self._user_cachemap[user]
+
+ def _get_or_offline_usercache(self, user):
+ """If the cache entry doesn't exist, return an OFFLINE one but do not
+ store it into the cache."""
+ if user in self._user_cachemap:
+ return self._user_cachemap[user]
+ else:
+ return UserPresenceCache()
+
+ def registered_user(self, user):
+ return self.store.create_presence(user.localpart)
+
+ @defer.inlineCallbacks
+ def is_presence_visible(self, observer_user, observed_user):
+ assert(self.hs.is_mine(observed_user))
+
+ if observer_user == observed_user:
+ defer.returnValue(True)
+
+ if (yield self.store.user_rooms_intersect(
+ [u.to_string() for u in observer_user, observed_user])):
+ defer.returnValue(True)
+
+ if (yield self.store.is_presence_visible(
+ observed_localpart=observed_user.localpart,
+ observer_userid=observer_user.to_string())):
+ defer.returnValue(True)
+
+ defer.returnValue(False)
+
+ @defer.inlineCallbacks
+ def get_state(self, target_user, auth_user, as_event=False, check_auth=True):
+ """Get the current presence state of the given user.
+
+ Args:
+ target_user (UserID): The user whose presence we want
+ auth_user (UserID): The user requesting the presence, used for
+ checking if said user is allowed to see the persence of the
+ `target_user`
+ as_event (bool): Format the return as an event or not?
+ check_auth (bool): Perform the auth checks or not?
+
+ Returns:
+ dict: The presence state of the `target_user`, whose format depends
+ on the `as_event` argument.
+ """
+ if self.hs.is_mine(target_user):
+ if check_auth:
+ visible = yield self.is_presence_visible(
+ observer_user=auth_user,
+ observed_user=target_user
+ )
+
+ if not visible:
+ raise SynapseError(404, "Presence information not visible")
+
+ if target_user in self._user_cachemap:
+ state = self._user_cachemap[target_user].get_state()
+ else:
+ state = yield self.store.get_presence_state(target_user.localpart)
+ if "mtime" in state:
+ del state["mtime"]
+ state["presence"] = state.pop("state")
+ else:
+ # TODO(paul): Have remote server send us permissions set
+ state = self._get_or_offline_usercache(target_user).get_state()
+
+ if "last_active" in state:
+ state["last_active_ago"] = int(
+ self.clock.time_msec() - state.pop("last_active")
+ )
+
+ if as_event:
+ content = state
+
+ content["user_id"] = target_user.to_string()
+
+ if "last_active" in content:
+ content["last_active_ago"] = int(
+ self._clock.time_msec() - content.pop("last_active")
+ )
+
+ defer.returnValue({"type": "m.presence", "content": content})
+ else:
+ defer.returnValue(state)
+
+ @defer.inlineCallbacks
+ def get_states(self, target_users, auth_user, as_event=False, check_auth=True):
+ """A batched version of the `get_state` method that accepts a list of
+ `target_users`
+
+ Args:
+ target_users (list): The list of UserID's whose presence we want
+ auth_user (UserID): The user requesting the presence, used for
+ checking if said user is allowed to see the persence of the
+ `target_users`
+ as_event (bool): Format the return as an event or not?
+ check_auth (bool): Perform the auth checks or not?
+
+ Returns:
+ dict: A mapping from user -> presence_state
+ """
+ local_users, remote_users = partitionbool(
+ target_users,
+ lambda u: self.hs.is_mine(u)
+ )
+
+ if check_auth:
+ for user in local_users:
+ visible = yield self.is_presence_visible(
+ observer_user=auth_user,
+ observed_user=user
+ )
+
+ if not visible:
+ raise SynapseError(404, "Presence information not visible")
+
+ results = {}
+ if local_users:
+ for user in local_users:
+ if user in self._user_cachemap:
+ results[user] = self._user_cachemap[user].get_state()
+
+ local_to_user = {u.localpart: u for u in local_users}
+
+ states = yield self.store.get_presence_states(
+ [u.localpart for u in local_users if u not in results]
+ )
+
+ for local_part, state in states.items():
+ if state is None:
+ continue
+ res = {"presence": state["state"]}
+ if "status_msg" in state and state["status_msg"]:
+ res["status_msg"] = state["status_msg"]
+ results[local_to_user[local_part]] = res
+
+ for user in remote_users:
+ # TODO(paul): Have remote server send us permissions set
+ results[user] = self._get_or_offline_usercache(user).get_state()
+
+ for state in results.values():
+ if "last_active" in state:
+ state["last_active_ago"] = int(
+ self.clock.time_msec() - state.pop("last_active")
+ )
+
+ if as_event:
+ for user, state in results.items():
+ content = state
+ content["user_id"] = user.to_string()
+
+ if "last_active" in content:
+ content["last_active_ago"] = int(
+ self._clock.time_msec() - content.pop("last_active")
+ )
+
+ results[user] = {"type": "m.presence", "content": content}
+
+ defer.returnValue(results)
+
+ @defer.inlineCallbacks
+ @log_function
+ def set_state(self, target_user, auth_user, state):
+ # return
+ # TODO (erikj): Turn this back on. Why did we end up sending EDUs
+ # everywhere?
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ if target_user != auth_user:
+ raise AuthError(400, "Cannot set another user's presence")
+
+ if "status_msg" not in state:
+ state["status_msg"] = None
+
+ for k in state.keys():
+ if k not in ("presence", "status_msg"):
+ raise SynapseError(
+ 400, "Unexpected presence state key '%s'" % (k,)
+ )
+
+ if state["presence"] not in self.STATE_LEVELS:
+ raise SynapseError(400, "'%s' is not a valid presence state" % (
+ state["presence"],
+ ))
+
+ logger.debug("Updating presence state of %s to %s",
+ target_user.localpart, state["presence"])
+
+ state_to_store = dict(state)
+ state_to_store["state"] = state_to_store.pop("presence")
+
+ statuscache = self._get_or_offline_usercache(target_user)
+ was_level = self.STATE_LEVELS[statuscache.get_state()["presence"]]
+ now_level = self.STATE_LEVELS[state["presence"]]
+
+ yield self.store.set_presence_state(
+ target_user.localpart, state_to_store
+ )
+ yield self.distributor.fire(
+ "collect_presencelike_data", target_user, state
+ )
+
+ if now_level > was_level:
+ state["last_active"] = self.clock.time_msec()
+
+ now_online = state["presence"] != PresenceState.OFFLINE
+ was_polling = target_user in self._user_cachemap
+
+ if now_online and not was_polling:
+ self.start_polling_presence(target_user, state=state)
+ elif not now_online and was_polling:
+ self.stop_polling_presence(target_user)
+
+ # TODO(paul): perform a presence push as part of start/stop poll so
+ # we don't have to do this all the time
+ yield self.changed_presencelike_data(target_user, state)
+
+ def bump_presence_active_time(self, user, now=None):
+ if now is None:
+ now = self.clock.time_msec()
+
+ prev_state = self._get_or_make_usercache(user)
+ if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY:
+ return
+
+ self.changed_presencelike_data(user, {"last_active": now})
+
+ def get_joined_rooms_for_user(self, user):
+ """Get the list of rooms a user is joined to.
+
+ Args:
+ user(UserID): The user.
+ Returns:
+ A Deferred of a list of room id strings.
+ """
+ rm_handler = self.homeserver.get_handlers().room_member_handler
+ return rm_handler.get_joined_rooms_for_user(user)
+
+ def get_joined_users_for_room_id(self, room_id):
+ rm_handler = self.homeserver.get_handlers().room_member_handler
+ return rm_handler.get_room_members(room_id)
+
+ @defer.inlineCallbacks
+ def changed_presencelike_data(self, user, state):
+ """Updates the presence state of a local user.
+
+ Args:
+ user(UserID): The user being updated.
+ state(dict): The new presence state for the user.
+ Returns:
+ A Deferred
+ """
+ self._user_cachemap_latest_serial += 1
+ statuscache = yield self.update_presence_cache(user, state)
+ yield self.push_presence(user, statuscache=statuscache)
+
+ @log_function
+ def started_user_eventstream(self, user):
+ # TODO(paul): Use "last online" state
+ return self.set_state(user, user, {"presence": PresenceState.ONLINE})
+
+ @log_function
+ def stopped_user_eventstream(self, user):
+ # TODO(paul): Save current state as "last online" state
+ return self.set_state(user, user, {"presence": PresenceState.OFFLINE})
+
+ @defer.inlineCallbacks
+ def user_joined_room(self, user, room_id):
+ """Called via the distributor whenever a user joins a room.
+ Notifies the new member of the presence of the current members.
+ Notifies the current members of the room of the new member's presence.
+
+ Args:
+ user(UserID): The user who joined the room.
+ room_id(str): The room id the user joined.
+ """
+ if self.hs.is_mine(user):
+ # No actual update but we need to bump the serial anyway for the
+ # event source
+ self._user_cachemap_latest_serial += 1
+ statuscache = yield self.update_presence_cache(
+ user, room_ids=[room_id]
+ )
+ self.push_update_to_local_and_remote(
+ observed_user=user,
+ room_ids=[room_id],
+ statuscache=statuscache,
+ )
+
+ # We also want to tell them about current presence of people.
+ curr_users = yield self.get_joined_users_for_room_id(room_id)
+
+ for local_user in [c for c in curr_users if self.hs.is_mine(c)]:
+ statuscache = yield self.update_presence_cache(
+ local_user, room_ids=[room_id], add_to_cache=False
+ )
+
+ self.push_update_to_local_and_remote(
+ observed_user=local_user,
+ users_to_push=[user],
+ statuscache=statuscache,
+ )
+
+ @defer.inlineCallbacks
+ def send_invite(self, observer_user, observed_user):
+ """Request the presence of a local or remote user for a local user"""
+ if not self.hs.is_mine(observer_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ yield self.store.add_presence_list_pending(
+ observer_user.localpart, observed_user.to_string()
+ )
+
+ if self.hs.is_mine(observed_user):
+ yield self.invite_presence(observed_user, observer_user)
+ else:
+ yield self.federation.send_edu(
+ destination=observed_user.domain,
+ edu_type="m.presence_invite",
+ content={
+ "observed_user": observed_user.to_string(),
+ "observer_user": observer_user.to_string(),
+ }
+ )
+
+ @defer.inlineCallbacks
+ def _should_accept_invite(self, observed_user, observer_user):
+ if not self.hs.is_mine(observed_user):
+ defer.returnValue(False)
+
+ row = yield self.store.has_presence_state(observed_user.localpart)
+ if not row:
+ defer.returnValue(False)
+
+ # TODO(paul): Eventually we'll ask the user's permission for this
+ # before accepting. For now just accept any invite request
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ def invite_presence(self, observed_user, observer_user):
+ """Handles a m.presence_invite EDU. A remote or local user has
+ requested presence updates for a local user. If the invite is accepted
+ then allow the local or remote user to see the presence of the local
+ user.
+
+ Args:
+ observed_user(UserID): The local user whose presence is requested.
+ observer_user(UserID): The remote or local user requesting presence.
+ """
+ accept = yield self._should_accept_invite(observed_user, observer_user)
+
+ if accept:
+ yield self.store.allow_presence_visible(
+ observed_user.localpart, observer_user.to_string()
+ )
+
+ if self.hs.is_mine(observer_user):
+ if accept:
+ yield self.accept_presence(observed_user, observer_user)
+ else:
+ yield self.deny_presence(observed_user, observer_user)
+ else:
+ edu_type = "m.presence_accept" if accept else "m.presence_deny"
+
+ yield self.federation.send_edu(
+ destination=observer_user.domain,
+ edu_type=edu_type,
+ content={
+ "observed_user": observed_user.to_string(),
+ "observer_user": observer_user.to_string(),
+ }
+ )
+
+ @defer.inlineCallbacks
+ def accept_presence(self, observed_user, observer_user):
+ """Handles a m.presence_accept EDU. Mark a presence invite from a
+ local or remote user as accepted in a local user's presence list.
+ Starts polling for presence updates from the local or remote user.
+
+ Args:
+ observed_user(UserID): The user to update in the presence list.
+ observer_user(UserID): The owner of the presence list to update.
+ """
+ yield self.store.set_presence_list_accepted(
+ observer_user.localpart, observed_user.to_string()
+ )
+
+ self.start_polling_presence(
+ observer_user, target_user=observed_user
+ )
+
+ @defer.inlineCallbacks
+ def deny_presence(self, observed_user, observer_user):
+ """Handle a m.presence_deny EDU. Removes a local or remote user from a
+ local user's presence list.
+
+ Args:
+ observed_user(UserID): The local or remote user to remove from the
+ list.
+ observer_user(UserID): The local owner of the presence list.
+ Returns:
+ A Deferred.
+ """
+ yield self.store.del_presence_list(
+ observer_user.localpart, observed_user.to_string()
+ )
+
+ # TODO(paul): Inform the user somehow?
+
+ @defer.inlineCallbacks
+ def drop(self, observed_user, observer_user):
+ """Remove a local or remote user from a local user's presence list and
+ unsubscribe the local user from updates that user.
+
+ Args:
+ observed_user(UserId): The local or remote user to remove from the
+ list.
+ observer_user(UserId): The local owner of the presence list.
+ Returns:
+ A Deferred.
+ """
+ if not self.hs.is_mine(observer_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ yield self.store.del_presence_list(
+ observer_user.localpart, observed_user.to_string()
+ )
+
+ self.stop_polling_presence(
+ observer_user, target_user=observed_user
+ )
+
+ @defer.inlineCallbacks
+ def get_presence_list(self, observer_user, accepted=None):
+ """Get the presence list for a local user. The retured list includes
+ the current presence state for each user listed.
+
+ Args:
+ observer_user(UserID): The local user whose presence list to fetch.
+ accepted(bool or None): If not none then only include users who
+ have or have not accepted the presence invite request.
+ Returns:
+ A Deferred list of presence state events.
+ """
+ if not self.hs.is_mine(observer_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ presence_list = yield self.store.get_presence_list(
+ observer_user.localpart, accepted=accepted
+ )
+
+ results = []
+ for row in presence_list:
+ observed_user = UserID.from_string(row["observed_user_id"])
+ result = {
+ "observed_user": observed_user, "accepted": row["accepted"]
+ }
+ result.update(
+ self._get_or_offline_usercache(observed_user).get_state()
+ )
+ if "last_active" in result:
+ result["last_active_ago"] = int(
+ self.clock.time_msec() - result.pop("last_active")
+ )
+ results.append(result)
+
+ defer.returnValue(results)
+
+ @defer.inlineCallbacks
+ @log_function
+ def start_polling_presence(self, user, target_user=None, state=None):
+ """Subscribe a local user to presence updates from a local or remote
+ user. If no target_user is supplied then subscribe to all users stored
+ in the presence list for the local user.
+
+ Additonally this pushes the current presence state of this user to all
+ target_users. That state can be provided directly or will be read from
+ the stored state for the local user.
+
+ Also this attempts to notify the local user of the current state of
+ any local target users.
+
+ Args:
+ user(UserID): The local user that whishes for presence updates.
+ target_user(UserID): The local or remote user whose updates are
+ wanted.
+ state(dict): Optional presence state for the local user.
+ """
+ logger.debug("Start polling for presence from %s", user)
+
+ if target_user:
+ target_users = set([target_user])
+ room_ids = []
+ else:
+ presence = yield self.store.get_presence_list(
+ user.localpart, accepted=True
+ )
+ target_users = set([
+ UserID.from_string(x["observed_user_id"]) for x in presence
+ ])
+
+ # Also include people in all my rooms
+
+ room_ids = yield self.get_joined_rooms_for_user(user)
+
+ if state is None:
+ state = yield self.store.get_presence_state(user.localpart)
+ else:
+ # statuscache = self._get_or_make_usercache(user)
+ # self._user_cachemap_latest_serial += 1
+ # statuscache.update(state, self._user_cachemap_latest_serial)
+ pass
+
+ yield self.push_update_to_local_and_remote(
+ observed_user=user,
+ users_to_push=target_users,
+ room_ids=room_ids,
+ statuscache=self._get_or_make_usercache(user),
+ )
+
+ for target_user in target_users:
+ if self.hs.is_mine(target_user):
+ self._start_polling_local(user, target_user)
+
+ # We want to tell the person that just came online
+ # presence state of people they are interested in?
+ self.push_update_to_clients(
+ users_to_push=[user],
+ )
+
+ deferreds = []
+ remote_users = [u for u in target_users if not self.hs.is_mine(u)]
+ remoteusers_by_domain = partition(remote_users, lambda u: u.domain)
+ # Only poll for people in our get_presence_list
+ for domain in remoteusers_by_domain:
+ remoteusers = remoteusers_by_domain[domain]
+
+ deferreds.append(self._start_polling_remote(
+ user, domain, remoteusers
+ ))
+
+ yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ def _start_polling_local(self, user, target_user):
+ """Subscribe a local user to presence updates for a local user
+
+ Args:
+ user(UserId): The local user that wishes for updates.
+ target_user(UserId): The local users whose updates are wanted.
+ """
+ target_localpart = target_user.localpart
+
+ if target_localpart not in self._local_pushmap:
+ self._local_pushmap[target_localpart] = set()
+
+ self._local_pushmap[target_localpart].add(user)
+
+ def _start_polling_remote(self, user, domain, remoteusers):
+ """Subscribe a local user to presence updates for remote users on a
+ given remote domain.
+
+ Args:
+ user(UserID): The local user that wishes for updates.
+ domain(str): The remote server the local user wants updates from.
+ remoteusers(UserID): The remote users that local user wants to be
+ told about.
+ Returns:
+ A Deferred.
+ """
+ to_poll = set()
+
+ for u in remoteusers:
+ if u not in self._remote_recvmap:
+ self._remote_recvmap[u] = set()
+ to_poll.add(u)
+
+ self._remote_recvmap[u].add(user)
+
+ if not to_poll:
+ return defer.succeed(None)
+
+ return self.federation.send_edu(
+ destination=domain,
+ edu_type="m.presence",
+ content={"poll": [u.to_string() for u in to_poll]}
+ )
+
+ @log_function
+ def stop_polling_presence(self, user, target_user=None):
+ """Unsubscribe a local user from presence updates from a local or
+ remote user. If no target user is supplied then unsubscribe the user
+ from all presence updates that the user had subscribed to.
+
+ Args:
+ user(UserID): The local user that no longer wishes for updates.
+ target_user(UserID or None): The user whose updates are no longer
+ wanted.
+ Returns:
+ A Deferred.
+ """
+ logger.debug("Stop polling for presence from %s", user)
+
+ if not target_user or self.hs.is_mine(target_user):
+ self._stop_polling_local(user, target_user=target_user)
+
+ deferreds = []
+
+ if target_user:
+ if target_user not in self._remote_recvmap:
+ return
+ target_users = set([target_user])
+ else:
+ target_users = self._remote_recvmap.keys()
+
+ remoteusers = [u for u in target_users
+ if user in self._remote_recvmap[u]]
+ remoteusers_by_domain = partition(remoteusers, lambda u: u.domain)
+
+ for domain in remoteusers_by_domain:
+ remoteusers = remoteusers_by_domain[domain]
+
+ deferreds.append(
+ self._stop_polling_remote(user, domain, remoteusers)
+ )
+
+ return defer.DeferredList(deferreds, consumeErrors=True)
+
+ def _stop_polling_local(self, user, target_user):
+ """Unsubscribe a local user from presence updates from a local user on
+ this server.
+
+ Args:
+ user(UserID): The local user that no longer wishes for updates.
+ target_user(UserID): The user whose updates are no longer wanted.
+ """
+ for localpart in self._local_pushmap.keys():
+ if target_user and localpart != target_user.localpart:
+ continue
+
+ if user in self._local_pushmap[localpart]:
+ self._local_pushmap[localpart].remove(user)
+
+ if not self._local_pushmap[localpart]:
+ del self._local_pushmap[localpart]
+
+ @log_function
+ def _stop_polling_remote(self, user, domain, remoteusers):
+ """Unsubscribe a local user from presence updates from remote users on
+ a given domain.
+
+ Args:
+ user(UserID): The local user that no longer wishes for updates.
+ domain(str): The remote server to unsubscribe from.
+ remoteusers([UserID]): The users on that remote server that the
+ local user no longer wishes to be updated about.
+ Returns:
+ A Deferred.
+ """
+ to_unpoll = set()
+
+ for u in remoteusers:
+ self._remote_recvmap[u].remove(user)
+
+ if not self._remote_recvmap[u]:
+ del self._remote_recvmap[u]
+ to_unpoll.add(u)
+
+ if not to_unpoll:
+ return defer.succeed(None)
+
+ return self.federation.send_edu(
+ destination=domain,
+ edu_type="m.presence",
+ content={"unpoll": [u.to_string() for u in to_unpoll]}
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def push_presence(self, user, statuscache):
+ """
+ Notify local and remote users of a change in presence of a local user.
+ Pushes the update to local clients and remote domains that are directly
+ subscribed to the presence of the local user.
+ Also pushes that update to any local user or remote domain that shares
+ a room with the local user.
+
+ Args:
+ user(UserID): The local user whose presence was updated.
+ statuscache(UserPresenceCache): Cache of the user's presence state
+ Returns:
+ A Deferred.
+ """
+ assert(self.hs.is_mine(user))
+
+ logger.debug("Pushing presence update from %s", user)
+
+ localusers = set(self._local_pushmap.get(user.localpart, set()))
+ remotedomains = set(self._remote_sendmap.get(user.localpart, set()))
+
+ # Reflect users' status changes back to themselves, so UIs look nice
+ # and also user is informed of server-forced pushes
+ localusers.add(user)
+
+ room_ids = yield self.get_joined_rooms_for_user(user)
+
+ if not localusers and not room_ids:
+ defer.returnValue(None)
+
+ yield self.push_update_to_local_and_remote(
+ observed_user=user,
+ users_to_push=localusers,
+ remote_domains=remotedomains,
+ room_ids=room_ids,
+ statuscache=statuscache,
+ )
+ yield self.distributor.fire("user_presence_changed", user, statuscache)
+
+ @defer.inlineCallbacks
+ def incoming_presence(self, origin, content):
+ """Handle an incoming m.presence EDU.
+ For each presence update in the "push" list update our local cache and
+ notify the appropriate local clients. Only clients that share a room
+ or are directly subscribed to the presence for a user should be
+ notified of the update.
+ For each subscription request in the "poll" list start pushing presence
+ updates to the remote server.
+ For unsubscribe request in the "unpoll" list stop pushing presence
+ updates to the remote server.
+
+ Args:
+ orgin(str): The source of this m.presence EDU.
+ content(dict): The content of this m.presence EDU.
+ Returns:
+ A Deferred.
+ """
+ deferreds = []
+
+ for push in content.get("push", []):
+ user = UserID.from_string(push["user_id"])
+
+ logger.debug("Incoming presence update from %s", user)
+
+ observers = set(self._remote_recvmap.get(user, set()))
+ if observers:
+ logger.debug(
+ " | %d interested local observers %r", len(observers), observers
+ )
+
+ room_ids = yield self.get_joined_rooms_for_user(user)
+ if room_ids:
+ logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids)
+
+ state = dict(push)
+ del state["user_id"]
+
+ if "presence" not in state:
+ logger.warning(
+ "Received a presence 'push' EDU from %s without a"
+ " 'presence' key", origin
+ )
+ continue
+
+ if "last_active_ago" in state:
+ state["last_active"] = int(
+ self.clock.time_msec() - state.pop("last_active_ago")
+ )
+
+ self._user_cachemap_latest_serial += 1
+ yield self.update_presence_cache(user, state, room_ids=room_ids)
+
+ if not observers and not room_ids:
+ logger.debug(" | no interested observers or room IDs")
+ continue
+
+ self.push_update_to_clients(
+ users_to_push=observers, room_ids=room_ids
+ )
+
+ user_id = user.to_string()
+
+ if state["presence"] == PresenceState.OFFLINE:
+ self._remote_offline_serials.insert(
+ 0,
+ (self._user_cachemap_latest_serial, set([user_id]))
+ )
+ while len(self._remote_offline_serials) > MAX_OFFLINE_SERIALS:
+ self._remote_offline_serials.pop() # remove the oldest
+ if user in self._user_cachemap:
+ del self._user_cachemap[user]
+ else:
+ # Remove the user from remote_offline_serials now that they're
+ # no longer offline
+ for idx, elem in enumerate(self._remote_offline_serials):
+ (_, user_ids) = elem
+ user_ids.discard(user_id)
+ if not user_ids:
+ self._remote_offline_serials.pop(idx)
+
+ for poll in content.get("poll", []):
+ user = UserID.from_string(poll)
+
+ if not self.hs.is_mine(user):
+ continue
+
+ # TODO(paul) permissions checks
+
+ if user not in self._remote_sendmap:
+ self._remote_sendmap[user] = set()
+
+ self._remote_sendmap[user].add(origin)
+
+ deferreds.append(self._push_presence_remote(user, origin))
+
+ for unpoll in content.get("unpoll", []):
+ user = UserID.from_string(unpoll)
+
+ if not self.hs.is_mine(user):
+ continue
+
+ if user in self._remote_sendmap:
+ self._remote_sendmap[user].remove(origin)
+
+ if not self._remote_sendmap[user]:
+ del self._remote_sendmap[user]
+
+ yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ @defer.inlineCallbacks
+ def update_presence_cache(self, user, state={}, room_ids=None,
+ add_to_cache=True):
+ """Update the presence cache for a user with a new state and bump the
+ serial to the latest value.
+
+ Args:
+ user(UserID): The user being updated
+ state(dict): The presence state being updated
+ room_ids(None or list of str): A list of room_ids to update. If
+ room_ids is None then fetch the list of room_ids the user is
+ joined to.
+ add_to_cache: Whether to add an entry to the presence cache if the
+ user isn't already in the cache.
+ Returns:
+ A Deferred UserPresenceCache for the user being updated.
+ """
+ if room_ids is None:
+ room_ids = yield self.get_joined_rooms_for_user(user)
+
+ for room_id in room_ids:
+ self._room_serials[room_id] = self._user_cachemap_latest_serial
+ if add_to_cache:
+ statuscache = self._get_or_make_usercache(user)
+ else:
+ statuscache = self._get_or_offline_usercache(user)
+ statuscache.update(state, serial=self._user_cachemap_latest_serial)
+ defer.returnValue(statuscache)
+
+ @defer.inlineCallbacks
+ def push_update_to_local_and_remote(self, observed_user, statuscache,
+ users_to_push=[], room_ids=[],
+ remote_domains=[]):
+ """Notify local clients and remote servers of a change in the presence
+ of a user.
+
+ Args:
+ observed_user(UserID): The user to push the presence state for.
+ statuscache(UserPresenceCache): The cache for the presence state to
+ push.
+ users_to_push([UserID]): A list of local and remote users to
+ notify.
+ room_ids([str]): Notify the local and remote occupants of these
+ rooms.
+ remote_domains([str]): A list of remote servers to notify in
+ addition to those implied by the users_to_push and the
+ room_ids.
+ Returns:
+ A Deferred.
+ """
+
+ localusers, remoteusers = partitionbool(
+ users_to_push,
+ lambda u: self.hs.is_mine(u)
+ )
+
+ localusers = set(localusers)
+
+ self.push_update_to_clients(
+ users_to_push=localusers, room_ids=room_ids
+ )
+
+ remote_domains = set(remote_domains)
+ remote_domains |= set([r.domain for r in remoteusers])
+ for room_id in room_ids:
+ remote_domains.update(
+ (yield self.store.get_joined_hosts_for_room(room_id))
+ )
+
+ remote_domains.discard(self.hs.hostname)
+
+ deferreds = []
+ for domain in remote_domains:
+ logger.debug(" | push to remote domain %s", domain)
+ deferreds.append(
+ self._push_presence_remote(
+ observed_user, domain, state=statuscache.get_state()
+ )
+ )
+
+ yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ defer.returnValue((localusers, remote_domains))
+
+ def push_update_to_clients(self, users_to_push=[], room_ids=[]):
+ """Notify clients of a new presence event.
+
+ Args:
+ users_to_push([UserID]): List of users to notify.
+ room_ids([str]): List of room_ids to notify.
+ """
+ with PreserveLoggingContext():
+ self.notifier.on_new_event(
+ "presence_key",
+ self._user_cachemap_latest_serial,
+ users_to_push,
+ room_ids,
+ )
+
+ @defer.inlineCallbacks
+ def _push_presence_remote(self, user, destination, state=None):
+ """Push a user's presence to a remote server. If a presence state event
+ that event is sent. Otherwise a new state event is constructed from the
+ stored presence state.
+ The last_active is replaced with last_active_ago in case the wallclock
+ time on the remote server is different to the time on this server.
+ Sends an EDU to the remote server with the current presence state.
+
+ Args:
+ user(UserID): The user to push the presence state for.
+ destination(str): The remote server to send state to.
+ state(dict): The state to push, or None to use the current stored
+ state.
+ Returns:
+ A Deferred.
+ """
+ if state is None:
+ state = yield self.store.get_presence_state(user.localpart)
+ del state["mtime"]
+ state["presence"] = state.pop("state")
+
+ if user in self._user_cachemap:
+ state["last_active"] = (
+ self._user_cachemap[user].get_state()["last_active"]
+ )
+
+ yield self.distributor.fire(
+ "collect_presencelike_data", user, state
+ )
+
+ if "last_active" in state:
+ state = dict(state)
+ state["last_active_ago"] = int(
+ self.clock.time_msec() - state.pop("last_active")
+ )
+
+ user_state = {"user_id": user.to_string(), }
+ user_state.update(state)
+
+ yield self.federation.send_edu(
+ destination=destination,
+ edu_type="m.presence",
+ content={"push": [user_state, ], }
+ )
+
+
+class PresenceEventSource(object):
+ def __init__(self, hs):
+ self.hs = hs
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ @log_function
+ def get_new_events(self, user, from_key, room_ids=None, **kwargs):
+ from_key = int(from_key)
+ room_ids = room_ids or []
+
+ presence = self.hs.get_handlers().presence_handler
+ cachemap = presence._user_cachemap
+
+ max_serial = presence._user_cachemap_latest_serial
+
+ clock = self.clock
+ latest_serial = 0
+
+ user_ids_to_check = {user}
+ presence_list = yield presence.store.get_presence_list(
+ user.localpart, accepted=True
+ )
+ if presence_list is not None:
+ user_ids_to_check |= set(
+ UserID.from_string(p["observed_user_id"]) for p in presence_list
+ )
+ for room_id in set(room_ids) & set(presence._room_serials):
+ if presence._room_serials[room_id] > from_key:
+ joined = yield presence.get_joined_users_for_room_id(room_id)
+ user_ids_to_check |= set(joined)
+
+ updates = []
+ for observed_user in user_ids_to_check & set(cachemap):
+ cached = cachemap[observed_user]
+
+ if cached.serial <= from_key or cached.serial > max_serial:
+ continue
+
+ latest_serial = max(cached.serial, latest_serial)
+ updates.append(cached.make_event(user=observed_user, clock=clock))
+
+ # TODO(paul): limit
+
+ for serial, user_ids in presence._remote_offline_serials:
+ if serial <= from_key:
+ break
+
+ if serial > max_serial:
+ continue
+
+ latest_serial = max(latest_serial, serial)
+ for u in user_ids:
+ updates.append({
+ "type": "m.presence",
+ "content": {"user_id": u, "presence": PresenceState.OFFLINE},
+ })
+ # TODO(paul): For the v2 API we want to tell the client their from_key
+ # is too old if we fell off the end of the _remote_offline_serials
+ # list, and get them to invalidate+resync. In v1 we have no such
+ # concept so this is a best-effort result.
+
+ if updates:
+ defer.returnValue((updates, latest_serial))
+ else:
+ defer.returnValue(([], presence._user_cachemap_latest_serial))
+
+ def get_current_key(self):
+ presence = self.hs.get_handlers().presence_handler
+ return presence._user_cachemap_latest_serial
+
+ @defer.inlineCallbacks
+ def get_pagination_rows(self, user, pagination_config, key):
+ # TODO (erikj): Does this make sense? Ordering?
+
+ from_key = int(pagination_config.from_key)
+
+ if pagination_config.to_key:
+ to_key = int(pagination_config.to_key)
+ else:
+ to_key = -1
+
+ presence = self.hs.get_handlers().presence_handler
+ cachemap = presence._user_cachemap
+
+ user_ids_to_check = {user}
+ presence_list = yield presence.store.get_presence_list(
+ user.localpart, accepted=True
+ )
+ if presence_list is not None:
+ user_ids_to_check |= set(
+ UserID.from_string(p["observed_user_id"]) for p in presence_list
+ )
+ room_ids = yield presence.get_joined_rooms_for_user(user)
+ for room_id in set(room_ids) & set(presence._room_serials):
+ if presence._room_serials[room_id] >= from_key:
+ joined = yield presence.get_joined_users_for_room_id(room_id)
+ user_ids_to_check |= set(joined)
+
+ updates = []
+ for observed_user in user_ids_to_check & set(cachemap):
+ if not (to_key < cachemap[observed_user].serial <= from_key):
+ continue
+
+ updates.append((observed_user, cachemap[observed_user]))
+
+ # TODO(paul): limit
+
+ if updates:
+ clock = self.clock
+
+ earliest_serial = max([x[1].serial for x in updates])
+ data = [x[1].make_event(user=x[0], clock=clock) for x in updates]
+
+ defer.returnValue((data, earliest_serial))
+ else:
+ defer.returnValue(([], 0))
+
+
+class UserPresenceCache(object):
+ """Store an observed user's state and status message.
+
+ Includes the update timestamp.
+ """
+ def __init__(self):
+ self.state = {"presence": PresenceState.OFFLINE}
+ self.serial = None
+
+ def __repr__(self):
+ return "UserPresenceCache(state=%r, serial=%r)" % (
+ self.state, self.serial
+ )
+
+ def update(self, state, serial):
+ assert("mtime_age" not in state)
+
+ self.state.update(state)
+ # Delete keys that are now 'None'
+ for k in self.state.keys():
+ if self.state[k] is None:
+ del self.state[k]
+
+ self.serial = serial
+
+ if "status_msg" in state:
+ self.status_msg = state["status_msg"]
+ else:
+ self.status_msg = None
+
+ def get_state(self):
+ # clone it so caller can't break our cache
+ state = dict(self.state)
+ return state
+
+ def make_event(self, user, clock):
+ content = self.get_state()
+ content["user_id"] = user.to_string()
+
+ if "last_active" in content:
+ content["last_active_ago"] = int(
+ clock.time_msec() - content.pop("last_active")
+ )
+
+ return {"type": "m.presence", "content": content}
diff --git a/synapse/handlers/private_user_data.py b/synapse/handlers/private_user_data.py
new file mode 100644
index 00000000..1abe45ed
--- /dev/null
+++ b/synapse/handlers/private_user_data.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+
+class PrivateUserDataEventSource(object):
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+
+ def get_current_key(self, direction='f'):
+ return self.store.get_max_private_user_data_stream_id()
+
+ @defer.inlineCallbacks
+ def get_new_events(self, user, from_key, **kwargs):
+ user_id = user.to_string()
+ last_stream_id = from_key
+
+ current_stream_id = yield self.store.get_max_private_user_data_stream_id()
+ tags = yield self.store.get_updated_tags(user_id, last_stream_id)
+
+ results = []
+ for room_id, room_tags in tags.items():
+ results.append({
+ "type": "m.tag",
+ "content": {"tags": room_tags},
+ "room_id": room_id,
+ })
+
+ defer.returnValue((results, current_stream_id))
+
+ @defer.inlineCallbacks
+ def get_pagination_rows(self, user, config, key):
+ defer.returnValue(([], config.to_id))
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
new file mode 100644
index 00000000..799faffe
--- /dev/null
+++ b/synapse/handlers/profile.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError, AuthError, CodeMessageException
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID
+from synapse.util import unwrapFirstError
+
+from ._base import BaseHandler
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class ProfileHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(ProfileHandler, self).__init__(hs)
+
+ self.federation = hs.get_replication_layer()
+ self.federation.register_query_handler(
+ "profile", self.on_profile_query
+ )
+
+ distributor = hs.get_distributor()
+ self.distributor = distributor
+
+ distributor.observe("registered_user", self.registered_user)
+
+ distributor.observe(
+ "collect_presencelike_data", self.collect_presencelike_data
+ )
+
+ def registered_user(self, user):
+ return self.store.create_profile(user.localpart)
+
+ @defer.inlineCallbacks
+ def get_displayname(self, target_user):
+ if self.hs.is_mine(target_user):
+ displayname = yield self.store.get_profile_displayname(
+ target_user.localpart
+ )
+
+ defer.returnValue(displayname)
+ else:
+ try:
+ result = yield self.federation.make_query(
+ destination=target_user.domain,
+ query_type="profile",
+ args={
+ "user_id": target_user.to_string(),
+ "field": "displayname",
+ }
+ )
+ except CodeMessageException as e:
+ if e.code != 404:
+ logger.exception("Failed to get displayname")
+
+ raise
+ except:
+ logger.exception("Failed to get displayname")
+ else:
+ defer.returnValue(result["displayname"])
+
+ @defer.inlineCallbacks
+ def set_displayname(self, target_user, auth_user, new_displayname):
+ """target_user is the user whose displayname is to be changed;
+ auth_user is the user attempting to make this change."""
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ if target_user != auth_user:
+ raise AuthError(400, "Cannot set another user's displayname")
+
+ if new_displayname == '':
+ new_displayname = None
+
+ yield self.store.set_profile_displayname(
+ target_user.localpart, new_displayname
+ )
+
+ yield self.distributor.fire(
+ "changed_presencelike_data", target_user, {
+ "displayname": new_displayname,
+ }
+ )
+
+ yield self._update_join_states(target_user)
+
+ @defer.inlineCallbacks
+ def get_avatar_url(self, target_user):
+ if self.hs.is_mine(target_user):
+ avatar_url = yield self.store.get_profile_avatar_url(
+ target_user.localpart
+ )
+
+ defer.returnValue(avatar_url)
+ else:
+ try:
+ result = yield self.federation.make_query(
+ destination=target_user.domain,
+ query_type="profile",
+ args={
+ "user_id": target_user.to_string(),
+ "field": "avatar_url",
+ }
+ )
+ except CodeMessageException as e:
+ if e.code != 404:
+ logger.exception("Failed to get avatar_url")
+ raise
+ except:
+ logger.exception("Failed to get avatar_url")
+
+ defer.returnValue(result["avatar_url"])
+
+ @defer.inlineCallbacks
+ def set_avatar_url(self, target_user, auth_user, new_avatar_url):
+ """target_user is the user whose avatar_url is to be changed;
+ auth_user is the user attempting to make this change."""
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ if target_user != auth_user:
+ raise AuthError(400, "Cannot set another user's avatar_url")
+
+ yield self.store.set_profile_avatar_url(
+ target_user.localpart, new_avatar_url
+ )
+
+ yield self.distributor.fire(
+ "changed_presencelike_data", target_user, {
+ "avatar_url": new_avatar_url,
+ }
+ )
+
+ yield self._update_join_states(target_user)
+
+ @defer.inlineCallbacks
+ def collect_presencelike_data(self, user, state):
+ if not self.hs.is_mine(user):
+ defer.returnValue(None)
+
+ (displayname, avatar_url) = yield defer.gatherResults(
+ [
+ self.store.get_profile_displayname(user.localpart),
+ self.store.get_profile_avatar_url(user.localpart),
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+
+ state["displayname"] = displayname
+ state["avatar_url"] = avatar_url
+
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def on_profile_query(self, args):
+ user = UserID.from_string(args["user_id"])
+ if not self.hs.is_mine(user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ just_field = args.get("field", None)
+
+ response = {}
+
+ if just_field is None or just_field == "displayname":
+ response["displayname"] = yield self.store.get_profile_displayname(
+ user.localpart
+ )
+
+ if just_field is None or just_field == "avatar_url":
+ response["avatar_url"] = yield self.store.get_profile_avatar_url(
+ user.localpart
+ )
+
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ def _update_join_states(self, user):
+ if not self.hs.is_mine(user):
+ return
+
+ self.ratelimit(user.to_string())
+
+ joins = yield self.store.get_rooms_for_user(
+ user.to_string(),
+ )
+
+ for j in joins:
+ content = {
+ "membership": Membership.JOIN,
+ }
+
+ yield self.distributor.fire(
+ "collect_presencelike_data", user, content
+ )
+
+ msg_handler = self.hs.get_handlers().message_handler
+ try:
+ yield msg_handler.create_and_send_event({
+ "type": EventTypes.Member,
+ "room_id": j.room_id,
+ "state_key": user.to_string(),
+ "content": content,
+ "sender": user.to_string()
+ }, ratelimit=False)
+ except Exception as e:
+ logger.warn(
+ "Failed to update join event for room %s - %s",
+ j.room_id, str(e.message)
+ )
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
new file mode 100644
index 00000000..973f4d5c
--- /dev/null
+++ b/synapse/handlers/receipts.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseHandler
+
+from twisted.internet import defer
+
+from synapse.util.logcontext import PreserveLoggingContext
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class ReceiptsHandler(BaseHandler):
+ def __init__(self, hs):
+ super(ReceiptsHandler, self).__init__(hs)
+
+ self.hs = hs
+ self.federation = hs.get_replication_layer()
+ self.federation.register_edu_handler(
+ "m.receipt", self._received_remote_receipt
+ )
+ self.clock = self.hs.get_clock()
+
+ self._receipt_cache = None
+
+ @defer.inlineCallbacks
+ def received_client_receipt(self, room_id, receipt_type, user_id,
+ event_id):
+ """Called when a client tells us a local user has read up to the given
+ event_id in the room.
+ """
+ receipt = {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "event_ids": [event_id],
+ "data": {
+ "ts": int(self.clock.time_msec()),
+ }
+ }
+
+ is_new = yield self._handle_new_receipts([receipt])
+
+ if is_new:
+ self._push_remotes([receipt])
+
+ @defer.inlineCallbacks
+ def _received_remote_receipt(self, origin, content):
+ """Called when we receive an EDU of type m.receipt from a remote HS.
+ """
+ receipts = [
+ {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "event_ids": user_values["event_ids"],
+ "data": user_values.get("data", {}),
+ }
+ for room_id, room_values in content.items()
+ for receipt_type, users in room_values.items()
+ for user_id, user_values in users.items()
+ ]
+
+ yield self._handle_new_receipts(receipts)
+
+ @defer.inlineCallbacks
+ def _handle_new_receipts(self, receipts):
+ """Takes a list of receipts, stores them and informs the notifier.
+ """
+ for receipt in receipts:
+ room_id = receipt["room_id"]
+ receipt_type = receipt["receipt_type"]
+ user_id = receipt["user_id"]
+ event_ids = receipt["event_ids"]
+ data = receipt["data"]
+
+ res = yield self.store.insert_receipt(
+ room_id, receipt_type, user_id, event_ids, data
+ )
+
+ if not res:
+ # res will be None if this read receipt is 'old'
+ defer.returnValue(False)
+
+ stream_id, max_persisted_id = res
+
+ with PreserveLoggingContext():
+ self.notifier.on_new_event(
+ "receipt_key", max_persisted_id, rooms=[room_id]
+ )
+
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ def _push_remotes(self, receipts):
+ """Given a list of receipts, works out which remote servers should be
+ poked and pokes them.
+ """
+ # TODO: Some of this stuff should be coallesced.
+ for receipt in receipts:
+ room_id = receipt["room_id"]
+ receipt_type = receipt["receipt_type"]
+ user_id = receipt["user_id"]
+ event_ids = receipt["event_ids"]
+ data = receipt["data"]
+
+ remotedomains = set()
+
+ rm_handler = self.hs.get_handlers().room_member_handler
+ yield rm_handler.fetch_room_distributions_into(
+ room_id, localusers=None, remotedomains=remotedomains
+ )
+
+ logger.debug("Sending receipt to: %r", remotedomains)
+
+ for domain in remotedomains:
+ self.federation.send_edu(
+ destination=domain,
+ edu_type="m.receipt",
+ content={
+ room_id: {
+ receipt_type: {
+ user_id: {
+ "event_ids": event_ids,
+ "data": data,
+ }
+ }
+ },
+ },
+ )
+
+ @defer.inlineCallbacks
+ def get_receipts_for_room(self, room_id, to_key):
+ """Gets all receipts for a room, upto the given key.
+ """
+ result = yield self.store.get_linearized_receipts_for_room(
+ room_id,
+ to_key=to_key,
+ )
+
+ if not result:
+ defer.returnValue([])
+
+ defer.returnValue(result)
+
+
+class ReceiptEventSource(object):
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def get_new_events(self, from_key, room_ids, **kwargs):
+ from_key = int(from_key)
+ to_key = yield self.get_current_key()
+
+ if from_key == to_key:
+ defer.returnValue(([], to_key))
+
+ events = yield self.store.get_linearized_receipts_for_rooms(
+ room_ids,
+ from_key=from_key,
+ to_key=to_key,
+ )
+
+ defer.returnValue((events, to_key))
+
+ def get_current_key(self, direction='f'):
+ return self.store.get_max_receipt_stream_id()
+
+ @defer.inlineCallbacks
+ def get_pagination_rows(self, user, config, key):
+ to_key = int(config.from_key)
+
+ if config.to_key:
+ from_key = int(config.to_key)
+ else:
+ from_key = None
+
+ rooms = yield self.store.get_rooms_for_user(user.to_string())
+ rooms = [room.room_id for room in rooms]
+ events = yield self.store.get_linearized_receipts_for_rooms(
+ rooms,
+ from_key=from_key,
+ to_key=to_key,
+ )
+
+ defer.returnValue((events, to_key))
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
new file mode 100644
index 00000000..493a0870
--- /dev/null
+++ b/synapse/handlers/register.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains functions for registering clients."""
+from twisted.internet import defer
+
+from synapse.types import UserID
+from synapse.api.errors import (
+ AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
+)
+from ._base import BaseHandler
+import synapse.util.stringutils as stringutils
+from synapse.util.async import run_on_reactor
+from synapse.http.client import CaptchaServerHttpClient
+
+import logging
+import urllib
+
+logger = logging.getLogger(__name__)
+
+
+class RegistrationHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(RegistrationHandler, self).__init__(hs)
+
+ self.distributor = hs.get_distributor()
+ self.distributor.declare("registered_user")
+
+ @defer.inlineCallbacks
+ def check_username(self, localpart):
+ yield run_on_reactor()
+
+ if urllib.quote(localpart) != localpart:
+ raise SynapseError(
+ 400,
+ "User ID must only contain characters which do not"
+ " require URL encoding."
+ )
+
+ user = UserID(localpart, self.hs.hostname)
+ user_id = user.to_string()
+
+ yield self.check_user_id_is_valid(user_id)
+
+ users = yield self.store.get_users_by_id_case_insensitive(user_id)
+ if users:
+ raise SynapseError(
+ 400,
+ "User ID already taken.",
+ errcode=Codes.USER_IN_USE,
+ )
+
+ @defer.inlineCallbacks
+ def register(self, localpart=None, password=None, generate_token=True):
+ """Registers a new client on the server.
+
+ Args:
+ localpart : The local part of the user ID to register. If None,
+ one will be randomly generated.
+ password (str) : The password to assign to this user so they can
+ login again. This can be None which means they cannot login again
+ via a password (e.g. the user is an application service user).
+ Returns:
+ A tuple of (user_id, access_token).
+ Raises:
+ RegistrationError if there was a problem registering.
+ """
+ yield run_on_reactor()
+ password_hash = None
+ if password:
+ password_hash = self.auth_handler().hash(password)
+
+ if localpart:
+ yield self.check_username(localpart)
+
+ user = UserID(localpart, self.hs.hostname)
+ user_id = user.to_string()
+
+ token = None
+ if generate_token:
+ token = self.auth_handler().generate_access_token(user_id)
+ yield self.store.register(
+ user_id=user_id,
+ token=token,
+ password_hash=password_hash
+ )
+
+ yield self.distributor.fire("registered_user", user)
+ else:
+ # autogen a random user ID
+ attempts = 0
+ user_id = None
+ token = None
+ while not user_id:
+ try:
+ localpart = self._generate_user_id()
+ user = UserID(localpart, self.hs.hostname)
+ user_id = user.to_string()
+ yield self.check_user_id_is_valid(user_id)
+ if generate_token:
+ token = self.auth_handler().generate_access_token(user_id)
+ yield self.store.register(
+ user_id=user_id,
+ token=token,
+ password_hash=password_hash)
+
+ self.distributor.fire("registered_user", user)
+ except SynapseError:
+ # if user id is taken, just generate another
+ user_id = None
+ token = None
+ attempts += 1
+ if attempts > 5:
+ raise RegistrationError(
+ 500, "Cannot generate user ID.")
+
+ # create a default avatar for the user
+ # XXX: ideally clients would explicitly specify one, but given they don't
+ # and we want consistent and pretty identicons for random users, we'll
+ # do it here.
+ try:
+ auth_user = UserID.from_string(user_id)
+ media_repository = self.hs.get_resource_for_media_repository()
+ identicon_resource = media_repository.getChildWithDefault("identicon", None)
+ upload_resource = media_repository.getChildWithDefault("upload", None)
+ identicon_bytes = identicon_resource.generate_identicon(user_id, 320, 320)
+ content_uri = yield upload_resource.create_content(
+ "image/png", None, identicon_bytes, len(identicon_bytes), auth_user
+ )
+ profile_handler = self.hs.get_handlers().profile_handler
+ profile_handler.set_avatar_url(
+ auth_user, auth_user, ("%s#auto" % (content_uri,))
+ )
+ except NotImplementedError:
+ pass # make tests pass without messing around creating default avatars
+
+ defer.returnValue((user_id, token))
+
+ @defer.inlineCallbacks
+ def appservice_register(self, user_localpart, as_token):
+ user = UserID(user_localpart, self.hs.hostname)
+ user_id = user.to_string()
+ service = yield self.store.get_app_service_by_token(as_token)
+ if not service:
+ raise AuthError(403, "Invalid application service token.")
+ if not service.is_interested_in_user(user_id):
+ raise SynapseError(
+ 400, "Invalid user localpart for this application service.",
+ errcode=Codes.EXCLUSIVE
+ )
+ token = self.auth_handler().generate_access_token(user_id)
+ yield self.store.register(
+ user_id=user_id,
+ token=token,
+ password_hash=""
+ )
+ self.distributor.fire("registered_user", user)
+ defer.returnValue((user_id, token))
+
+ @defer.inlineCallbacks
+ def check_recaptcha(self, ip, private_key, challenge, response):
+ """
+ Checks a recaptcha is correct.
+
+ Used only by c/s api v1
+ """
+
+ captcha_response = yield self._validate_captcha(
+ ip,
+ private_key,
+ challenge,
+ response
+ )
+ if not captcha_response["valid"]:
+ logger.info("Invalid captcha entered from %s. Error: %s",
+ ip, captcha_response["error_url"])
+ raise InvalidCaptchaError(
+ error_url=captcha_response["error_url"]
+ )
+ else:
+ logger.info("Valid captcha entered from %s", ip)
+
+ @defer.inlineCallbacks
+ def register_saml2(self, localpart):
+ """
+ Registers email_id as SAML2 Based Auth.
+ """
+ if urllib.quote(localpart) != localpart:
+ raise SynapseError(
+ 400,
+ "User ID must only contain characters which do not"
+ " require URL encoding."
+ )
+ user = UserID(localpart, self.hs.hostname)
+ user_id = user.to_string()
+
+ yield self.check_user_id_is_valid(user_id)
+ token = self.auth_handler().generate_access_token(user_id)
+ try:
+ yield self.store.register(
+ user_id=user_id,
+ token=token,
+ password_hash=None
+ )
+ yield self.distributor.fire("registered_user", user)
+ except Exception, e:
+ yield self.store.add_access_token_to_user(user_id, token)
+ # Ignore Registration errors
+ logger.exception(e)
+ defer.returnValue((user_id, token))
+
+ @defer.inlineCallbacks
+ def register_email(self, threepidCreds):
+ """
+ Registers emails with an identity server.
+
+ Used only by c/s api v1
+ """
+
+ for c in threepidCreds:
+ logger.info("validating theeepidcred sid %s on id server %s",
+ c['sid'], c['idServer'])
+ try:
+ identity_handler = self.hs.get_handlers().identity_handler
+ threepid = yield identity_handler.threepid_from_creds(c)
+ except:
+ logger.exception("Couldn't validate 3pid")
+ raise RegistrationError(400, "Couldn't validate 3pid")
+
+ if not threepid:
+ raise RegistrationError(400, "Couldn't validate 3pid")
+ logger.info("got threepid with medium '%s' and address '%s'",
+ threepid['medium'], threepid['address'])
+
+ @defer.inlineCallbacks
+ def bind_emails(self, user_id, threepidCreds):
+ """Links emails with a user ID and informs an identity server.
+
+ Used only by c/s api v1
+ """
+
+ # Now we have a matrix ID, bind it to the threepids we were given
+ for c in threepidCreds:
+ identity_handler = self.hs.get_handlers().identity_handler
+ # XXX: This should be a deferred list, shouldn't it?
+ yield identity_handler.bind_threepid(c, user_id)
+
+ @defer.inlineCallbacks
+ def check_user_id_is_valid(self, user_id):
+ # valid user IDs must not clash with any user ID namespaces claimed by
+ # application services.
+ services = yield self.store.get_app_services()
+ interested_services = [
+ s for s in services if s.is_interested_in_user(user_id)
+ ]
+ for service in interested_services:
+ if service.is_exclusive_user(user_id):
+ raise SynapseError(
+ 400, "This user ID is reserved by an application service.",
+ errcode=Codes.EXCLUSIVE
+ )
+
+ def _generate_user_id(self):
+ return "-" + stringutils.random_string(18)
+
+ @defer.inlineCallbacks
+ def _validate_captcha(self, ip_addr, private_key, challenge, response):
+ """Validates the captcha provided.
+
+ Used only by c/s api v1
+
+ Returns:
+ dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
+
+ """
+ response = yield self._submit_captcha(ip_addr, private_key, challenge,
+ response)
+ # parse Google's response. Lovely format..
+ lines = response.split('\n')
+ json = {
+ "valid": lines[0] == 'true',
+ "error_url": "http://www.google.com/recaptcha/api/challenge?" +
+ "error=%s" % lines[1]
+ }
+ defer.returnValue(json)
+
+ @defer.inlineCallbacks
+ def _submit_captcha(self, ip_addr, private_key, challenge, response):
+ """
+ Used only by c/s api v1
+ """
+ # TODO: get this from the homeserver rather than creating a new one for
+ # each request
+ client = CaptchaServerHttpClient(self.hs)
+ data = yield client.post_urlencoded_get_raw(
+ "http://www.google.com:80/recaptcha/api/verify",
+ args={
+ 'privatekey': private_key,
+ 'remoteip': ip_addr,
+ 'challenge': challenge,
+ 'response': response
+ }
+ )
+ defer.returnValue(data)
+
+ def auth_handler(self):
+ return self.hs.get_handlers().auth_handler
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
new file mode 100644
index 00000000..3f047525
--- /dev/null
+++ b/synapse/handlers/room.py
@@ -0,0 +1,879 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains functions for performing events on rooms."""
+from twisted.internet import defer
+
+from ._base import BaseHandler
+
+from synapse.types import UserID, RoomAlias, RoomID
+from synapse.api.constants import (
+ EventTypes, Membership, JoinRules, RoomCreationPreset,
+)
+from synapse.api.errors import AuthError, StoreError, SynapseError
+from synapse.util import stringutils, unwrapFirstError
+from synapse.util.async import run_on_reactor
+
+from signedjson.sign import verify_signed_json
+from signedjson.key import decode_verify_key_bytes
+
+from collections import OrderedDict
+from unpaddedbase64 import decode_base64
+
+import logging
+import math
+import string
+
+logger = logging.getLogger(__name__)
+
+id_server_scheme = "https://"
+
+
+class RoomCreationHandler(BaseHandler):
+
+ PRESETS_DICT = {
+ RoomCreationPreset.PRIVATE_CHAT: {
+ "join_rules": JoinRules.INVITE,
+ "history_visibility": "shared",
+ "original_invitees_have_ops": False,
+ },
+ RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
+ "join_rules": JoinRules.INVITE,
+ "history_visibility": "shared",
+ "original_invitees_have_ops": True,
+ },
+ RoomCreationPreset.PUBLIC_CHAT: {
+ "join_rules": JoinRules.PUBLIC,
+ "history_visibility": "shared",
+ "original_invitees_have_ops": False,
+ },
+ }
+
+ @defer.inlineCallbacks
+ def create_room(self, user_id, room_id, config):
+ """ Creates a new room.
+
+ Args:
+ user_id (str): The ID of the user creating the new room.
+ room_id (str): The proposed ID for the new room. Can be None, in
+ which case one will be created for you.
+ config (dict) : A dict of configuration options.
+ Returns:
+ The new room ID.
+ Raises:
+ SynapseError if the room ID was taken, couldn't be stored, or
+ something went horribly wrong.
+ """
+ self.ratelimit(user_id)
+
+ if "room_alias_name" in config:
+ for wchar in string.whitespace:
+ if wchar in config["room_alias_name"]:
+ raise SynapseError(400, "Invalid characters in room alias")
+
+ room_alias = RoomAlias.create(
+ config["room_alias_name"],
+ self.hs.hostname,
+ )
+ mapping = yield self.store.get_association_from_room_alias(
+ room_alias
+ )
+
+ if mapping:
+ raise SynapseError(400, "Room alias already taken")
+ else:
+ room_alias = None
+
+ invite_list = config.get("invite", [])
+ for i in invite_list:
+ try:
+ UserID.from_string(i)
+ except:
+ raise SynapseError(400, "Invalid user_id: %s" % (i,))
+
+ is_public = config.get("visibility", None) == "public"
+
+ if room_id:
+ # Ensure room_id is the correct type
+ room_id_obj = RoomID.from_string(room_id)
+ if not self.hs.is_mine(room_id_obj):
+ raise SynapseError(400, "Room id must be local")
+
+ yield self.store.store_room(
+ room_id=room_id,
+ room_creator_user_id=user_id,
+ is_public=is_public
+ )
+ else:
+ # autogen room IDs and try to create it. We may clash, so just
+ # try a few times till one goes through, giving up eventually.
+ attempts = 0
+ room_id = None
+ while attempts < 5:
+ try:
+ random_string = stringutils.random_string(18)
+ gen_room_id = RoomID.create(
+ random_string,
+ self.hs.hostname,
+ )
+ yield self.store.store_room(
+ room_id=gen_room_id.to_string(),
+ room_creator_user_id=user_id,
+ is_public=is_public
+ )
+ room_id = gen_room_id.to_string()
+ break
+ except StoreError:
+ attempts += 1
+ if not room_id:
+ raise StoreError(500, "Couldn't generate a room ID.")
+
+ if room_alias:
+ directory_handler = self.hs.get_handlers().directory_handler
+ yield directory_handler.create_association(
+ user_id=user_id,
+ room_id=room_id,
+ room_alias=room_alias,
+ servers=[self.hs.hostname],
+ )
+
+ preset_config = config.get(
+ "preset",
+ RoomCreationPreset.PUBLIC_CHAT
+ if is_public
+ else RoomCreationPreset.PRIVATE_CHAT
+ )
+
+ raw_initial_state = config.get("initial_state", [])
+
+ initial_state = OrderedDict()
+ for val in raw_initial_state:
+ initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
+
+ creation_content = config.get("creation_content", {})
+
+ user = UserID.from_string(user_id)
+ creation_events = self._create_events_for_new_room(
+ user, room_id,
+ preset_config=preset_config,
+ invite_list=invite_list,
+ initial_state=initial_state,
+ creation_content=creation_content,
+ room_alias=room_alias,
+ )
+
+ msg_handler = self.hs.get_handlers().message_handler
+
+ for event in creation_events:
+ yield msg_handler.create_and_send_event(event, ratelimit=False)
+
+ if "name" in config:
+ name = config["name"]
+ yield msg_handler.create_and_send_event({
+ "type": EventTypes.Name,
+ "room_id": room_id,
+ "sender": user_id,
+ "state_key": "",
+ "content": {"name": name},
+ }, ratelimit=False)
+
+ if "topic" in config:
+ topic = config["topic"]
+ yield msg_handler.create_and_send_event({
+ "type": EventTypes.Topic,
+ "room_id": room_id,
+ "sender": user_id,
+ "state_key": "",
+ "content": {"topic": topic},
+ }, ratelimit=False)
+
+ for invitee in invite_list:
+ yield msg_handler.create_and_send_event({
+ "type": EventTypes.Member,
+ "state_key": invitee,
+ "room_id": room_id,
+ "sender": user_id,
+ "content": {"membership": Membership.INVITE},
+ }, ratelimit=False)
+
+ result = {"room_id": room_id}
+
+ if room_alias:
+ result["room_alias"] = room_alias.to_string()
+ yield directory_handler.send_room_alias_update_event(
+ user_id, room_id
+ )
+
+ defer.returnValue(result)
+
+ def _create_events_for_new_room(self, creator, room_id, preset_config,
+ invite_list, initial_state, creation_content,
+ room_alias):
+ config = RoomCreationHandler.PRESETS_DICT[preset_config]
+
+ creator_id = creator.to_string()
+
+ event_keys = {
+ "room_id": room_id,
+ "sender": creator_id,
+ "state_key": "",
+ }
+
+ def create(etype, content, **kwargs):
+ e = {
+ "type": etype,
+ "content": content,
+ }
+
+ e.update(event_keys)
+ e.update(kwargs)
+
+ return e
+
+ creation_content.update({"creator": creator.to_string()})
+ creation_event = create(
+ etype=EventTypes.Create,
+ content=creation_content,
+ )
+
+ join_event = create(
+ etype=EventTypes.Member,
+ state_key=creator_id,
+ content={
+ "membership": Membership.JOIN,
+ },
+ )
+
+ returned_events = [creation_event, join_event]
+
+ if (EventTypes.PowerLevels, '') not in initial_state:
+ power_level_content = {
+ "users": {
+ creator.to_string(): 100,
+ },
+ "users_default": 0,
+ "events": {
+ EventTypes.Name: 50,
+ EventTypes.PowerLevels: 100,
+ EventTypes.RoomHistoryVisibility: 100,
+ EventTypes.CanonicalAlias: 50,
+ EventTypes.RoomAvatar: 50,
+ },
+ "events_default": 0,
+ "state_default": 50,
+ "ban": 50,
+ "kick": 50,
+ "redact": 50,
+ "invite": 0,
+ }
+
+ if config["original_invitees_have_ops"]:
+ for invitee in invite_list:
+ power_level_content["users"][invitee] = 100
+
+ power_levels_event = create(
+ etype=EventTypes.PowerLevels,
+ content=power_level_content,
+ )
+
+ returned_events.append(power_levels_event)
+
+ if room_alias and (EventTypes.CanonicalAlias, '') not in initial_state:
+ room_alias_event = create(
+ etype=EventTypes.CanonicalAlias,
+ content={"alias": room_alias.to_string()},
+ )
+
+ returned_events.append(room_alias_event)
+
+ if (EventTypes.JoinRules, '') not in initial_state:
+ join_rules_event = create(
+ etype=EventTypes.JoinRules,
+ content={"join_rule": config["join_rules"]},
+ )
+
+ returned_events.append(join_rules_event)
+
+ if (EventTypes.RoomHistoryVisibility, '') not in initial_state:
+ history_event = create(
+ etype=EventTypes.RoomHistoryVisibility,
+ content={"history_visibility": config["history_visibility"]}
+ )
+
+ returned_events.append(history_event)
+
+ for (etype, state_key), content in initial_state.items():
+ returned_events.append(create(
+ etype=etype,
+ state_key=state_key,
+ content=content,
+ ))
+
+ return returned_events
+
+
+class RoomMemberHandler(BaseHandler):
+ # TODO(paul): This handler currently contains a messy conflation of
+ # low-level API that works on UserID objects and so on, and REST-level
+ # API that takes ID strings and returns pagination chunks. These concerns
+ # ought to be separated out a lot better.
+
+ def __init__(self, hs):
+ super(RoomMemberHandler, self).__init__(hs)
+
+ self.clock = hs.get_clock()
+
+ self.distributor = hs.get_distributor()
+ self.distributor.declare("user_joined_room")
+ self.distributor.declare("user_left_room")
+
+ @defer.inlineCallbacks
+ def get_room_members(self, room_id):
+ users = yield self.store.get_users_in_room(room_id)
+
+ defer.returnValue([UserID.from_string(u) for u in users])
+
+ @defer.inlineCallbacks
+ def fetch_room_distributions_into(self, room_id, localusers=None,
+ remotedomains=None, ignore_user=None):
+ """Fetch the distribution of a room, adding elements to either
+ 'localusers' or 'remotedomains', which should be a set() if supplied.
+ If ignore_user is set, ignore that user.
+
+ This function returns nothing; its result is performed by the
+ side-effect on the two passed sets. This allows easy accumulation of
+ member lists of multiple rooms at once if required.
+ """
+ members = yield self.get_room_members(room_id)
+ for member in members:
+ if ignore_user is not None and member == ignore_user:
+ continue
+
+ if self.hs.is_mine(member):
+ if localusers is not None:
+ localusers.add(member)
+ else:
+ if remotedomains is not None:
+ remotedomains.add(member.domain)
+
+ @defer.inlineCallbacks
+ def change_membership(self, event, context, do_auth=True, is_guest=False):
+ """ Change the membership status of a user in a room.
+
+ Args:
+ event (SynapseEvent): The membership event
+ Raises:
+ SynapseError if there was a problem changing the membership.
+ """
+ target_user_id = event.state_key
+
+ prev_state = context.current_state.get(
+ (EventTypes.Member, target_user_id),
+ None
+ )
+
+ room_id = event.room_id
+
+ # If we're trying to join a room then we have to do this differently
+ # if this HS is not currently in the room, i.e. we have to do the
+ # invite/join dance.
+ if event.membership == Membership.JOIN:
+ if is_guest:
+ guest_access = context.current_state.get(
+ (EventTypes.GuestAccess, ""),
+ None
+ )
+ is_guest_access_allowed = (
+ guest_access
+ and guest_access.content
+ and "guest_access" in guest_access.content
+ and guest_access.content["guest_access"] == "can_join"
+ )
+ if not is_guest_access_allowed:
+ raise AuthError(403, "Guest access not allowed")
+
+ yield self._do_join(event, context, do_auth=do_auth)
+ else:
+ if event.membership == Membership.LEAVE:
+ is_host_in_room = yield self.is_host_in_room(room_id, context)
+ if not is_host_in_room:
+ # Rejecting an invite, rather than leaving a joined room
+ handler = self.hs.get_handlers().federation_handler
+ inviter = yield self.get_inviter(event)
+ if not inviter:
+ # return the same error as join_room_alias does
+ raise SynapseError(404, "No known servers")
+ yield handler.do_remotely_reject_invite(
+ [inviter.domain],
+ room_id,
+ event.user_id
+ )
+ defer.returnValue({"room_id": room_id})
+ return
+
+ # FIXME: This isn't idempotency.
+ if prev_state and prev_state.membership == event.membership:
+ # double same action, treat this event as a NOOP.
+ defer.returnValue({})
+ return
+
+ yield self._do_local_membership_update(
+ event,
+ membership=event.content["membership"],
+ context=context,
+ do_auth=do_auth,
+ )
+
+ if prev_state and prev_state.membership == Membership.JOIN:
+ user = UserID.from_string(event.user_id)
+ self.distributor.fire(
+ "user_left_room", user=user, room_id=event.room_id
+ )
+
+ defer.returnValue({"room_id": room_id})
+
+ @defer.inlineCallbacks
+ def join_room_alias(self, joinee, room_alias, content={}):
+ directory_handler = self.hs.get_handlers().directory_handler
+ mapping = yield directory_handler.get_association(room_alias)
+
+ if not mapping:
+ raise SynapseError(404, "No such room alias")
+
+ room_id = mapping["room_id"]
+ hosts = mapping["servers"]
+ if not hosts:
+ raise SynapseError(404, "No known servers")
+
+ # If event doesn't include a display name, add one.
+ yield self.distributor.fire(
+ "collect_presencelike_data", joinee, content
+ )
+
+ content.update({"membership": Membership.JOIN})
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "state_key": joinee.to_string(),
+ "room_id": room_id,
+ "sender": joinee.to_string(),
+ "membership": Membership.JOIN,
+ "content": content,
+ })
+ event, context = yield self._create_new_client_event(builder)
+
+ yield self._do_join(event, context, room_hosts=hosts, do_auth=True)
+
+ defer.returnValue({"room_id": room_id})
+
+ @defer.inlineCallbacks
+ def _do_join(self, event, context, room_hosts=None, do_auth=True):
+ room_id = event.room_id
+
+ # XXX: We don't do an auth check if we are doing an invite
+ # join dance for now, since we're kinda implicitly checking
+ # that we are allowed to join when we decide whether or not we
+ # need to do the invite/join dance.
+
+ is_host_in_room = yield self.is_host_in_room(room_id, context)
+ if is_host_in_room:
+ should_do_dance = False
+ elif room_hosts: # TODO: Shouldn't this be remote_room_host?
+ should_do_dance = True
+ else:
+ inviter = yield self.get_inviter(event)
+ if not inviter:
+ # return the same error as join_room_alias does
+ raise SynapseError(404, "No known servers")
+ should_do_dance = not self.hs.is_mine(inviter)
+ room_hosts = [inviter.domain]
+
+ if should_do_dance:
+ handler = self.hs.get_handlers().federation_handler
+ yield handler.do_invite_join(
+ room_hosts,
+ room_id,
+ event.user_id,
+ event.content,
+ )
+ else:
+ logger.debug("Doing normal join")
+
+ yield self._do_local_membership_update(
+ event,
+ membership=event.content["membership"],
+ context=context,
+ do_auth=do_auth,
+ )
+
+ user = UserID.from_string(event.user_id)
+ yield self.distributor.fire(
+ "user_joined_room", user=user, room_id=room_id
+ )
+
+ @defer.inlineCallbacks
+ def get_inviter(self, event):
+ # TODO(markjh): get prev_state from snapshot
+ prev_state = yield self.store.get_room_member(
+ event.user_id, event.room_id
+ )
+
+ if prev_state and prev_state.membership == Membership.INVITE:
+ defer.returnValue(UserID.from_string(prev_state.user_id))
+ return
+ elif "third_party_invite" in event.content:
+ if "sender" in event.content["third_party_invite"]:
+ inviter = UserID.from_string(
+ event.content["third_party_invite"]["sender"]
+ )
+ defer.returnValue(inviter)
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def is_host_in_room(self, room_id, context):
+ is_host_in_room = yield self.auth.check_host_in_room(
+ room_id,
+ self.hs.hostname
+ )
+ if not is_host_in_room:
+ # is *anyone* in the room?
+ room_member_keys = [
+ v for (k, v) in context.current_state.keys() if (
+ k == "m.room.member"
+ )
+ ]
+ if len(room_member_keys) == 0:
+ # has the room been created so we can join it?
+ create_event = context.current_state.get(("m.room.create", ""))
+ if create_event:
+ is_host_in_room = True
+ defer.returnValue(is_host_in_room)
+
+ @defer.inlineCallbacks
+ def get_joined_rooms_for_user(self, user):
+ """Returns a list of roomids that the user has any of the given
+ membership states in."""
+
+ rooms = yield self.store.get_rooms_for_user(
+ user.to_string(),
+ )
+
+ # For some reason the list of events contains duplicates
+ # TODO(paul): work out why because I really don't think it should
+ room_ids = set(r.room_id for r in rooms)
+
+ defer.returnValue(room_ids)
+
+ @defer.inlineCallbacks
+ def _do_local_membership_update(self, event, membership, context,
+ do_auth):
+ yield run_on_reactor()
+
+ target_user = UserID.from_string(event.state_key)
+
+ yield self.handle_new_client_event(
+ event,
+ context,
+ extra_users=[target_user],
+ suppress_auth=(not do_auth),
+ )
+
+ @defer.inlineCallbacks
+ def do_3pid_invite(
+ self,
+ room_id,
+ inviter,
+ medium,
+ address,
+ id_server,
+ token_id,
+ txn_id
+ ):
+ invitee = yield self._lookup_3pid(
+ id_server, medium, address
+ )
+
+ if invitee:
+ # make sure it looks like a user ID; it'll throw if it's invalid.
+ UserID.from_string(invitee)
+ yield self.hs.get_handlers().message_handler.create_and_send_event(
+ {
+ "type": EventTypes.Member,
+ "content": {
+ "membership": unicode("invite")
+ },
+ "room_id": room_id,
+ "sender": inviter.to_string(),
+ "state_key": invitee,
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ )
+ else:
+ yield self._make_and_store_3pid_invite(
+ id_server,
+ medium,
+ address,
+ room_id,
+ inviter,
+ token_id,
+ txn_id=txn_id
+ )
+
+ @defer.inlineCallbacks
+ def _lookup_3pid(self, id_server, medium, address):
+ """Looks up a 3pid in the passed identity server.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
+
+ Returns:
+ (str) the matrix ID of the 3pid, or None if it is not recognized.
+ """
+ try:
+ data = yield self.hs.get_simple_http_client().get_json(
+ "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
+ {
+ "medium": medium,
+ "address": address,
+ }
+ )
+
+ if "mxid" in data:
+ if "signatures" not in data:
+ raise AuthError(401, "No signatures on 3pid binding")
+ self.verify_any_signature(data, id_server)
+ defer.returnValue(data["mxid"])
+
+ except IOError as e:
+ logger.warn("Error from identity server lookup: %s" % (e,))
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def verify_any_signature(self, data, server_hostname):
+ if server_hostname not in data["signatures"]:
+ raise AuthError(401, "No signature from server %s" % (server_hostname,))
+ for key_name, signature in data["signatures"][server_hostname].items():
+ key_data = yield self.hs.get_simple_http_client().get_json(
+ "%s%s/_matrix/identity/api/v1/pubkey/%s" %
+ (id_server_scheme, server_hostname, key_name,),
+ )
+ if "public_key" not in key_data:
+ raise AuthError(401, "No public key named %s from %s" %
+ (key_name, server_hostname,))
+ verify_signed_json(
+ data,
+ server_hostname,
+ decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
+ )
+ return
+
+ @defer.inlineCallbacks
+ def _make_and_store_3pid_invite(
+ self,
+ id_server,
+ medium,
+ address,
+ room_id,
+ user,
+ token_id,
+ txn_id
+ ):
+ token, public_key, key_validity_url, display_name = (
+ yield self._ask_id_server_for_third_party_invite(
+ id_server,
+ medium,
+ address,
+ room_id,
+ user.to_string()
+ )
+ )
+ msg_handler = self.hs.get_handlers().message_handler
+ yield msg_handler.create_and_send_event(
+ {
+ "type": EventTypes.ThirdPartyInvite,
+ "content": {
+ "display_name": display_name,
+ "key_validity_url": key_validity_url,
+ "public_key": public_key,
+ },
+ "room_id": room_id,
+ "sender": user.to_string(),
+ "state_key": token,
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ )
+
+ @defer.inlineCallbacks
+ def _ask_id_server_for_third_party_invite(
+ self, id_server, medium, address, room_id, sender):
+ is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
+ id_server_scheme, id_server,
+ )
+ data = yield self.hs.get_simple_http_client().post_urlencoded_get_json(
+ is_url,
+ {
+ "medium": medium,
+ "address": address,
+ "room_id": room_id,
+ "sender": sender,
+ }
+ )
+ # TODO: Check for success
+ token = data["token"]
+ public_key = data["public_key"]
+ display_name = data["display_name"]
+ key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
+ id_server_scheme, id_server,
+ )
+ defer.returnValue((token, public_key, key_validity_url, display_name))
+
+
+class RoomListHandler(BaseHandler):
+
+ @defer.inlineCallbacks
+ def get_public_room_list(self):
+ chunk = yield self.store.get_rooms(is_public=True)
+ results = yield defer.gatherResults(
+ [
+ self.store.get_users_in_room(room["room_id"])
+ for room in chunk
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+
+ for i, room in enumerate(chunk):
+ room["num_joined_members"] = len(results[i])
+
+ # FIXME (erikj): START is no longer a valid value
+ defer.returnValue({"start": "START", "end": "END", "chunk": chunk})
+
+
+class RoomContextHandler(BaseHandler):
+ @defer.inlineCallbacks
+ def get_event_context(self, user, room_id, event_id, limit, is_guest):
+ """Retrieves events, pagination tokens and state around a given event
+ in a room.
+
+ Args:
+ user (UserID)
+ room_id (str)
+ event_id (str)
+ limit (int): The maximum number of events to return in total
+ (excluding state).
+
+ Returns:
+ dict
+ """
+ before_limit = math.floor(limit/2.)
+ after_limit = limit - before_limit
+
+ now_token = yield self.hs.get_event_sources().get_current_token()
+
+ results = yield self.store.get_events_around(
+ room_id, event_id, before_limit, after_limit
+ )
+
+ results["events_before"] = yield self._filter_events_for_client(
+ user.to_string(),
+ results["events_before"],
+ is_guest=is_guest,
+ require_all_visible_for_guests=False
+ )
+
+ results["events_after"] = yield self._filter_events_for_client(
+ user.to_string(),
+ results["events_after"],
+ is_guest=is_guest,
+ require_all_visible_for_guests=False
+ )
+
+ if results["events_after"]:
+ last_event_id = results["events_after"][-1].event_id
+ else:
+ last_event_id = event_id
+
+ state = yield self.store.get_state_for_events(
+ [last_event_id], None
+ )
+ results["state"] = state[last_event_id].values()
+
+ results["start"] = now_token.copy_and_replace(
+ "room_key", results["start"]
+ ).to_string()
+
+ results["end"] = now_token.copy_and_replace(
+ "room_key", results["end"]
+ ).to_string()
+
+ defer.returnValue(results)
+
+
+class RoomEventSource(object):
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def get_new_events(
+ self,
+ user,
+ from_key,
+ limit,
+ room_ids,
+ is_guest,
+ ):
+ # We just ignore the key for now.
+
+ to_key = yield self.get_current_key()
+
+ app_service = yield self.store.get_app_service_by_user_id(
+ user.to_string()
+ )
+ if app_service:
+ events, end_key = yield self.store.get_appservice_room_stream(
+ service=app_service,
+ from_key=from_key,
+ to_key=to_key,
+ limit=limit,
+ )
+ else:
+ events, end_key = yield self.store.get_room_events_stream(
+ user_id=user.to_string(),
+ from_key=from_key,
+ to_key=to_key,
+ limit=limit,
+ room_ids=room_ids,
+ is_guest=is_guest,
+ )
+
+ defer.returnValue((events, end_key))
+
+ def get_current_key(self, direction='f'):
+ return self.store.get_room_events_max_id(direction)
+
+ @defer.inlineCallbacks
+ def get_pagination_rows(self, user, config, key):
+ events, next_key = yield self.store.paginate_room_events(
+ room_id=key,
+ from_key=config.from_key,
+ to_key=config.to_key,
+ direction=config.direction,
+ limit=config.limit,
+ )
+
+ defer.returnValue((events, next_key))
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
new file mode 100644
index 00000000..b7545c11
--- /dev/null
+++ b/synapse/handlers/search.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import BaseHandler
+
+from synapse.api.constants import Membership
+from synapse.api.filtering import Filter
+from synapse.api.errors import SynapseError
+from synapse.events.utils import serialize_event
+
+from unpaddedbase64 import decode_base64, encode_base64
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class SearchHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(SearchHandler, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def search(self, user, content, batch=None):
+ """Performs a full text search for a user.
+
+ Args:
+ user (UserID)
+ content (dict): Search parameters
+ batch (str): The next_batch parameter. Used for pagination.
+
+ Returns:
+ dict to be returned to the client with results of search
+ """
+
+ batch_group = None
+ batch_group_key = None
+ batch_token = None
+ if batch:
+ try:
+ b = decode_base64(batch)
+ batch_group, batch_group_key, batch_token = b.split("\n")
+
+ assert batch_group is not None
+ assert batch_group_key is not None
+ assert batch_token is not None
+ except:
+ raise SynapseError(400, "Invalid batch")
+
+ try:
+ room_cat = content["search_categories"]["room_events"]
+
+ # The actual thing to query in FTS
+ search_term = room_cat["search_term"]
+
+ # Which "keys" to search over in FTS query
+ keys = room_cat.get("keys", [
+ "content.body", "content.name", "content.topic",
+ ])
+
+ # Filter to apply to results
+ filter_dict = room_cat.get("filter", {})
+
+ # What to order results by (impacts whether pagination can be doen)
+ order_by = room_cat.get("order_by", "rank")
+
+ # Include context around each event?
+ event_context = room_cat.get(
+ "event_context", None
+ )
+
+ # Group results together? May allow clients to paginate within a
+ # group
+ group_by = room_cat.get("groupings", {}).get("group_by", {})
+ group_keys = [g["key"] for g in group_by]
+
+ if event_context is not None:
+ before_limit = int(event_context.get(
+ "before_limit", 5
+ ))
+ after_limit = int(event_context.get(
+ "after_limit", 5
+ ))
+ except KeyError:
+ raise SynapseError(400, "Invalid search query")
+
+ if order_by not in ("rank", "recent"):
+ raise SynapseError(400, "Invalid order by: %r" % (order_by,))
+
+ if set(group_keys) - {"room_id", "sender"}:
+ raise SynapseError(
+ 400,
+ "Invalid group by keys: %r" % (set(group_keys) - {"room_id", "sender"},)
+ )
+
+ search_filter = Filter(filter_dict)
+
+ # TODO: Search through left rooms too
+ rooms = yield self.store.get_rooms_for_user_where_membership_is(
+ user.to_string(),
+ membership_list=[Membership.JOIN],
+ # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
+ )
+ room_ids = set(r.room_id for r in rooms)
+
+ room_ids = search_filter.filter_rooms(room_ids)
+
+ if batch_group == "room_id":
+ room_ids.intersection_update({batch_group_key})
+
+ rank_map = {} # event_id -> rank of event
+ allowed_events = []
+ room_groups = {} # Holds result of grouping by room, if applicable
+ sender_group = {} # Holds result of grouping by sender, if applicable
+
+ # Holds the next_batch for the entire result set if one of those exists
+ global_next_batch = None
+
+ if order_by == "rank":
+ results = yield self.store.search_msgs(
+ room_ids, search_term, keys
+ )
+
+ results_map = {r["event"].event_id: r for r in results}
+
+ rank_map.update({r["event"].event_id: r["rank"] for r in results})
+
+ filtered_events = search_filter.filter([r["event"] for r in results])
+
+ events = yield self._filter_events_for_client(
+ user.to_string(), filtered_events
+ )
+
+ events.sort(key=lambda e: -rank_map[e.event_id])
+ allowed_events = events[:search_filter.limit()]
+
+ for e in allowed_events:
+ rm = room_groups.setdefault(e.room_id, {
+ "results": [],
+ "order": rank_map[e.event_id],
+ })
+ rm["results"].append(e.event_id)
+
+ s = sender_group.setdefault(e.sender, {
+ "results": [],
+ "order": rank_map[e.event_id],
+ })
+ s["results"].append(e.event_id)
+
+ elif order_by == "recent":
+ # In this case we specifically loop through each room as the given
+ # limit applies to each room, rather than a global list.
+ # This is not necessarilly a good idea.
+ for room_id in room_ids:
+ room_events = []
+ if batch_group == "room_id" and batch_group_key == room_id:
+ pagination_token = batch_token
+ else:
+ pagination_token = None
+ i = 0
+
+ # We keep looping and we keep filtering until we reach the limit
+ # or we run out of things.
+ # But only go around 5 times since otherwise synapse will be sad.
+ while len(room_events) < search_filter.limit() and i < 5:
+ i += 1
+ results = yield self.store.search_room(
+ room_id, search_term, keys, search_filter.limit() * 2,
+ pagination_token=pagination_token,
+ )
+
+ results_map = {r["event"].event_id: r for r in results}
+
+ rank_map.update({r["event"].event_id: r["rank"] for r in results})
+
+ filtered_events = search_filter.filter([
+ r["event"] for r in results
+ ])
+
+ events = yield self._filter_events_for_client(
+ user.to_string(), filtered_events
+ )
+
+ room_events.extend(events)
+ room_events = room_events[:search_filter.limit()]
+
+ if len(results) < search_filter.limit() * 2:
+ pagination_token = None
+ break
+ else:
+ pagination_token = results[-1]["pagination_token"]
+
+ if room_events:
+ res = results_map[room_events[-1].event_id]
+ pagination_token = res["pagination_token"]
+
+ group = room_groups.setdefault(room_id, {})
+ if pagination_token:
+ next_batch = encode_base64("%s\n%s\n%s" % (
+ "room_id", room_id, pagination_token
+ ))
+ group["next_batch"] = next_batch
+
+ if batch_token:
+ global_next_batch = next_batch
+
+ group["results"] = [e.event_id for e in room_events]
+ group["order"] = max(
+ e.origin_server_ts/1000 for e in room_events
+ if hasattr(e, "origin_server_ts")
+ )
+
+ allowed_events.extend(room_events)
+
+ # Normalize the group orders
+ if room_groups:
+ if len(room_groups) > 1:
+ mx = max(g["order"] for g in room_groups.values())
+ mn = min(g["order"] for g in room_groups.values())
+
+ for g in room_groups.values():
+ g["order"] = (g["order"] - mn) * 1.0 / (mx - mn)
+ else:
+ room_groups.values()[0]["order"] = 1
+
+ else:
+ # We should never get here due to the guard earlier.
+ raise NotImplementedError()
+
+ # If client has asked for "context" for each event (i.e. some surrounding
+ # events and state), fetch that
+ if event_context is not None:
+ now_token = yield self.hs.get_event_sources().get_current_token()
+
+ contexts = {}
+ for event in allowed_events:
+ res = yield self.store.get_events_around(
+ event.room_id, event.event_id, before_limit, after_limit
+ )
+
+ res["events_before"] = yield self._filter_events_for_client(
+ user.to_string(), res["events_before"]
+ )
+
+ res["events_after"] = yield self._filter_events_for_client(
+ user.to_string(), res["events_after"]
+ )
+
+ res["start"] = now_token.copy_and_replace(
+ "room_key", res["start"]
+ ).to_string()
+
+ res["end"] = now_token.copy_and_replace(
+ "room_key", res["end"]
+ ).to_string()
+
+ contexts[event.event_id] = res
+ else:
+ contexts = {}
+
+ # TODO: Add a limit
+
+ time_now = self.clock.time_msec()
+
+ for context in contexts.values():
+ context["events_before"] = [
+ serialize_event(e, time_now)
+ for e in context["events_before"]
+ ]
+ context["events_after"] = [
+ serialize_event(e, time_now)
+ for e in context["events_after"]
+ ]
+
+ results = {
+ e.event_id: {
+ "rank": rank_map[e.event_id],
+ "result": serialize_event(e, time_now),
+ "context": contexts.get(e.event_id, {}),
+ }
+ for e in allowed_events
+ }
+
+ logger.info("Found %d results", len(results))
+
+ rooms_cat_res = {
+ "results": results,
+ "count": len(results)
+ }
+
+ if room_groups and "room_id" in group_keys:
+ rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups
+
+ if sender_group and "sender" in group_keys:
+ rooms_cat_res.setdefault("groups", {})["sender"] = sender_group
+
+ if global_next_batch:
+ rooms_cat_res["next_batch"] = global_next_batch
+
+ defer.returnValue({
+ "search_categories": {
+ "room_events": rooms_cat_res
+ }
+ })
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
new file mode 100644
index 00000000..6dc9d0fb
--- /dev/null
+++ b/synapse/handlers/sync.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseHandler
+
+from synapse.streams.config import PaginationConfig
+from synapse.api.constants import Membership, EventTypes
+
+from twisted.internet import defer
+
+import collections
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+SyncConfig = collections.namedtuple("SyncConfig", [
+ "user",
+ "filter",
+])
+
+
+class TimelineBatch(collections.namedtuple("TimelineBatch", [
+ "prev_batch",
+ "events",
+ "limited",
+])):
+ __slots__ = []
+
+ def __nonzero__(self):
+ """Make the result appear empty if there are no updates. This is used
+ to tell if room needs to be part of the sync result.
+ """
+ return bool(self.events)
+
+
+class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
+ "room_id", # str
+ "timeline", # TimelineBatch
+ "state", # dict[(str, str), FrozenEvent]
+ "ephemeral",
+ "private_user_data",
+])):
+ __slots__ = []
+
+ def __nonzero__(self):
+ """Make the result appear empty if there are no updates. This is used
+ to tell if room needs to be part of the sync result.
+ """
+ return bool(
+ self.timeline
+ or self.state
+ or self.ephemeral
+ or self.private_user_data
+ )
+
+
+class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [
+ "room_id", # str
+ "timeline", # TimelineBatch
+ "state", # dict[(str, str), FrozenEvent]
+ "private_user_data",
+])):
+ __slots__ = []
+
+ def __nonzero__(self):
+ """Make the result appear empty if there are no updates. This is used
+ to tell if room needs to be part of the sync result.
+ """
+ return bool(
+ self.timeline
+ or self.state
+ or self.private_user_data
+ )
+
+
+class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
+ "room_id", # str
+ "invite", # FrozenEvent: the invite event
+])):
+ __slots__ = []
+
+ def __nonzero__(self):
+ """Invited rooms should always be reported to the client"""
+ return True
+
+
+class SyncResult(collections.namedtuple("SyncResult", [
+ "next_batch", # Token for the next sync
+ "presence", # List of presence events for the user.
+ "joined", # JoinedSyncResult for each joined room.
+ "invited", # InvitedSyncResult for each invited room.
+ "archived", # ArchivedSyncResult for each archived room.
+])):
+ __slots__ = []
+
+ def __nonzero__(self):
+ """Make the result appear empty if there are no updates. This is used
+ to tell if the notifier needs to wait for more events when polling for
+ events.
+ """
+ return bool(
+ self.presence or self.joined or self.invited
+ )
+
+
+class SyncHandler(BaseHandler):
+
+ def __init__(self, hs):
+ super(SyncHandler, self).__init__(hs)
+ self.event_sources = hs.get_event_sources()
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
+ full_state=False):
+ """Get the sync for a client if we have new data for it now. Otherwise
+ wait for new data to arrive on the server. If the timeout expires, then
+ return an empty sync result.
+ Returns:
+ A Deferred SyncResult.
+ """
+
+ if timeout == 0 or since_token is None or full_state:
+ # we are going to return immediately, so don't bother calling
+ # notifier.wait_for_events.
+ result = yield self.current_sync_for_user(sync_config, since_token,
+ full_state=full_state)
+ defer.returnValue(result)
+ else:
+ def current_sync_callback(before_token, after_token):
+ return self.current_sync_for_user(sync_config, since_token)
+
+ result = yield self.notifier.wait_for_events(
+ sync_config.user, timeout, current_sync_callback,
+ from_token=since_token
+ )
+ defer.returnValue(result)
+
+ def current_sync_for_user(self, sync_config, since_token=None,
+ full_state=False):
+ """Get the sync for client needed to match what the server has now.
+ Returns:
+ A Deferred SyncResult.
+ """
+ if since_token is None or full_state:
+ return self.full_state_sync(sync_config, since_token)
+ else:
+ return self.incremental_sync_with_gap(sync_config, since_token)
+
+ @defer.inlineCallbacks
+ def full_state_sync(self, sync_config, timeline_since_token):
+ """Get a sync for a client which is starting without any state.
+
+ If a 'message_since_token' is given, only timeline events which have
+ happened since that token will be returned.
+
+ Returns:
+ A Deferred SyncResult.
+ """
+ now_token = yield self.event_sources.get_current_token()
+
+ now_token, ephemeral_by_room = yield self.ephemeral_by_room(
+ sync_config, now_token
+ )
+
+ presence_stream = self.event_sources.sources["presence"]
+ # TODO (mjark): This looks wrong, shouldn't we be getting the presence
+ # UP to the present rather than after the present?
+ pagination_config = PaginationConfig(from_token=now_token)
+ presence, _ = yield presence_stream.get_pagination_rows(
+ user=sync_config.user,
+ pagination_config=pagination_config.get_source_config("presence"),
+ key=None
+ )
+ room_list = yield self.store.get_rooms_for_user_where_membership_is(
+ user_id=sync_config.user.to_string(),
+ membership_list=(
+ Membership.INVITE,
+ Membership.JOIN,
+ Membership.LEAVE,
+ Membership.BAN
+ )
+ )
+
+ tags_by_room = yield self.store.get_tags_for_user(
+ sync_config.user.to_string()
+ )
+
+ joined = []
+ invited = []
+ archived = []
+ for event in room_list:
+ if event.membership == Membership.JOIN:
+ room_sync = yield self.full_state_sync_for_joined_room(
+ room_id=event.room_id,
+ sync_config=sync_config,
+ now_token=now_token,
+ timeline_since_token=timeline_since_token,
+ ephemeral_by_room=ephemeral_by_room,
+ tags_by_room=tags_by_room,
+ )
+ joined.append(room_sync)
+ elif event.membership == Membership.INVITE:
+ invite = yield self.store.get_event(event.event_id)
+ invited.append(InvitedSyncResult(
+ room_id=event.room_id,
+ invite=invite,
+ ))
+ elif event.membership in (Membership.LEAVE, Membership.BAN):
+ leave_token = now_token.copy_and_replace(
+ "room_key", "s%d" % (event.stream_ordering,)
+ )
+ room_sync = yield self.full_state_sync_for_archived_room(
+ sync_config=sync_config,
+ room_id=event.room_id,
+ leave_event_id=event.event_id,
+ leave_token=leave_token,
+ timeline_since_token=timeline_since_token,
+ tags_by_room=tags_by_room,
+ )
+ archived.append(room_sync)
+
+ defer.returnValue(SyncResult(
+ presence=presence,
+ joined=joined,
+ invited=invited,
+ archived=archived,
+ next_batch=now_token,
+ ))
+
+ @defer.inlineCallbacks
+ def full_state_sync_for_joined_room(self, room_id, sync_config,
+ now_token, timeline_since_token,
+ ephemeral_by_room, tags_by_room):
+ """Sync a room for a client which is starting without any state
+ Returns:
+ A Deferred JoinedSyncResult.
+ """
+
+ batch = yield self.load_filtered_recents(
+ room_id, sync_config, now_token, since_token=timeline_since_token
+ )
+
+ current_state = yield self.get_state_at(room_id, now_token)
+
+ defer.returnValue(JoinedSyncResult(
+ room_id=room_id,
+ timeline=batch,
+ state=current_state,
+ ephemeral=ephemeral_by_room.get(room_id, []),
+ private_user_data=self.private_user_data_for_room(
+ room_id, tags_by_room
+ ),
+ ))
+
+ def private_user_data_for_room(self, room_id, tags_by_room):
+ private_user_data = []
+ tags = tags_by_room.get(room_id)
+ if tags is not None:
+ private_user_data.append({
+ "type": "m.tag",
+ "content": {"tags": tags},
+ })
+ return private_user_data
+
+ @defer.inlineCallbacks
+ def ephemeral_by_room(self, sync_config, now_token, since_token=None):
+ """Get the ephemeral events for each room the user is in
+ Args:
+ sync_config (SyncConfig): The flags, filters and user for the sync.
+ now_token (StreamToken): Where the server is currently up to.
+ since_token (StreamToken): Where the server was when the client
+ last synced.
+ Returns:
+ A tuple of the now StreamToken, updated to reflect the which typing
+ events are included, and a dict mapping from room_id to a list of
+ typing events for that room.
+ """
+
+ typing_key = since_token.typing_key if since_token else "0"
+
+ rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
+ room_ids = [room.room_id for room in rooms]
+
+ typing_source = self.event_sources.sources["typing"]
+ typing, typing_key = yield typing_source.get_new_events(
+ user=sync_config.user,
+ from_key=typing_key,
+ limit=sync_config.filter.ephemeral_limit(),
+ room_ids=room_ids,
+ is_guest=False,
+ )
+ now_token = now_token.copy_and_replace("typing_key", typing_key)
+
+ ephemeral_by_room = {}
+
+ for event in typing:
+ # we want to exclude the room_id from the event, but modifying the
+ # result returned by the event source is poor form (it might cache
+ # the object)
+ room_id = event["room_id"]
+ event_copy = {k: v for (k, v) in event.iteritems()
+ if k != "room_id"}
+ ephemeral_by_room.setdefault(room_id, []).append(event_copy)
+
+ receipt_key = since_token.receipt_key if since_token else "0"
+
+ receipt_source = self.event_sources.sources["receipt"]
+ receipts, receipt_key = yield receipt_source.get_new_events(
+ user=sync_config.user,
+ from_key=receipt_key,
+ limit=sync_config.filter.ephemeral_limit(),
+ room_ids=room_ids,
+ # /sync doesn't support guest access, they can't get to this point in code
+ is_guest=False,
+ )
+ now_token = now_token.copy_and_replace("receipt_key", receipt_key)
+
+ for event in receipts:
+ room_id = event["room_id"]
+ # exclude room id, as above
+ event_copy = {k: v for (k, v) in event.iteritems()
+ if k != "room_id"}
+ ephemeral_by_room.setdefault(room_id, []).append(event_copy)
+
+ defer.returnValue((now_token, ephemeral_by_room))
+
+ @defer.inlineCallbacks
+ def full_state_sync_for_archived_room(self, room_id, sync_config,
+ leave_event_id, leave_token,
+ timeline_since_token, tags_by_room):
+ """Sync a room for a client which is starting without any state
+ Returns:
+ A Deferred JoinedSyncResult.
+ """
+
+ batch = yield self.load_filtered_recents(
+ room_id, sync_config, leave_token, since_token=timeline_since_token
+ )
+
+ leave_state = yield self.store.get_state_for_event(leave_event_id)
+
+ defer.returnValue(ArchivedSyncResult(
+ room_id=room_id,
+ timeline=batch,
+ state=leave_state,
+ private_user_data=self.private_user_data_for_room(
+ room_id, tags_by_room
+ ),
+ ))
+
+ @defer.inlineCallbacks
+ def incremental_sync_with_gap(self, sync_config, since_token):
+ """ Get the incremental delta needed to bring the client up to
+ date with the server.
+ Returns:
+ A Deferred SyncResult.
+ """
+ now_token = yield self.event_sources.get_current_token()
+
+ rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
+ room_ids = [room.room_id for room in rooms]
+
+ presence_source = self.event_sources.sources["presence"]
+ presence, presence_key = yield presence_source.get_new_events(
+ user=sync_config.user,
+ from_key=since_token.presence_key,
+ limit=sync_config.filter.presence_limit(),
+ room_ids=room_ids,
+ # /sync doesn't support guest access, they can't get to this point in code
+ is_guest=False,
+ )
+ now_token = now_token.copy_and_replace("presence_key", presence_key)
+
+ now_token, ephemeral_by_room = yield self.ephemeral_by_room(
+ sync_config, now_token, since_token
+ )
+
+ rm_handler = self.hs.get_handlers().room_member_handler
+ app_service = yield self.store.get_app_service_by_user_id(
+ sync_config.user.to_string()
+ )
+ if app_service:
+ rooms = yield self.store.get_app_service_rooms(app_service)
+ joined_room_ids = set(r.room_id for r in rooms)
+ else:
+ joined_room_ids = yield rm_handler.get_joined_rooms_for_user(
+ sync_config.user
+ )
+
+ timeline_limit = sync_config.filter.timeline_limit()
+
+ room_events, _ = yield self.store.get_room_events_stream(
+ sync_config.user.to_string(),
+ from_key=since_token.room_key,
+ to_key=now_token.room_key,
+ limit=timeline_limit + 1,
+ )
+
+ tags_by_room = yield self.store.get_updated_tags(
+ sync_config.user.to_string(),
+ since_token.private_user_data_key,
+ )
+
+ joined = []
+ archived = []
+ if len(room_events) <= timeline_limit:
+ # There is no gap in any of the rooms. Therefore we can just
+ # partition the new events by room and return them.
+ logger.debug("Got %i events for incremental sync - not limited",
+ len(room_events))
+
+ invite_events = []
+ leave_events = []
+ events_by_room_id = {}
+ for event in room_events:
+ events_by_room_id.setdefault(event.room_id, []).append(event)
+ if event.room_id not in joined_room_ids:
+ if (event.type == EventTypes.Member
+ and event.state_key == sync_config.user.to_string()):
+ if event.membership == Membership.INVITE:
+ invite_events.append(event)
+ elif event.membership in (Membership.LEAVE, Membership.BAN):
+ leave_events.append(event)
+
+ for room_id in joined_room_ids:
+ recents = events_by_room_id.get(room_id, [])
+ logger.debug("Events for room %s: %r", room_id, recents)
+ state = {
+ (event.type, event.state_key): event
+ for event in recents if event.is_state()}
+ limited = False
+
+ if recents:
+ prev_batch = now_token.copy_and_replace(
+ "room_key", recents[0].internal_metadata.before
+ )
+ else:
+ prev_batch = now_token
+
+ just_joined = yield self.check_joined_room(sync_config, state)
+ if just_joined:
+ logger.debug("User has just joined %s: needs full state",
+ room_id)
+ state = yield self.get_state_at(room_id, now_token)
+ # the timeline is inherently limited if we've just joined
+ limited = True
+
+ room_sync = JoinedSyncResult(
+ room_id=room_id,
+ timeline=TimelineBatch(
+ events=recents,
+ prev_batch=prev_batch,
+ limited=limited,
+ ),
+ state=state,
+ ephemeral=ephemeral_by_room.get(room_id, []),
+ private_user_data=self.private_user_data_for_room(
+ room_id, tags_by_room
+ ),
+ )
+ logger.debug("Result for room %s: %r", room_id, room_sync)
+
+ if room_sync:
+ joined.append(room_sync)
+
+ else:
+ logger.debug("Got %i events for incremental sync - hit limit",
+ len(room_events))
+
+ invite_events = yield self.store.get_invites_for_user(
+ sync_config.user.to_string()
+ )
+
+ leave_events = yield self.store.get_leave_and_ban_events_for_user(
+ sync_config.user.to_string()
+ )
+
+ for room_id in joined_room_ids:
+ room_sync = yield self.incremental_sync_with_gap_for_room(
+ room_id, sync_config, since_token, now_token,
+ ephemeral_by_room, tags_by_room
+ )
+ if room_sync:
+ joined.append(room_sync)
+
+ for leave_event in leave_events:
+ room_sync = yield self.incremental_sync_for_archived_room(
+ sync_config, leave_event, since_token, tags_by_room
+ )
+ archived.append(room_sync)
+
+ invited = [
+ InvitedSyncResult(room_id=event.room_id, invite=event)
+ for event in invite_events
+ ]
+
+ defer.returnValue(SyncResult(
+ presence=presence,
+ joined=joined,
+ invited=invited,
+ archived=archived,
+ next_batch=now_token,
+ ))
+
+ @defer.inlineCallbacks
+ def load_filtered_recents(self, room_id, sync_config, now_token,
+ since_token=None):
+ """
+ :returns a Deferred TimelineBatch
+ """
+ limited = True
+ recents = []
+ filtering_factor = 2
+ timeline_limit = sync_config.filter.timeline_limit()
+ load_limit = max(timeline_limit * filtering_factor, 100)
+ max_repeat = 3 # Only try a few times per room, otherwise
+ room_key = now_token.room_key
+ end_key = room_key
+
+ while limited and len(recents) < timeline_limit and max_repeat:
+ events, keys = yield self.store.get_recent_events_for_room(
+ room_id,
+ limit=load_limit + 1,
+ from_token=since_token.room_key if since_token else None,
+ end_token=end_key,
+ )
+ (room_key, _) = keys
+ end_key = "s" + room_key.split('-')[-1]
+ loaded_recents = sync_config.filter.filter_room_timeline(events)
+ loaded_recents = yield self._filter_events_for_client(
+ sync_config.user.to_string(), loaded_recents,
+ )
+ loaded_recents.extend(recents)
+ recents = loaded_recents
+ if len(events) <= load_limit:
+ limited = False
+ max_repeat -= 1
+
+ if len(recents) > timeline_limit:
+ limited = True
+ recents = recents[-timeline_limit:]
+ room_key = recents[0].internal_metadata.before
+
+ prev_batch_token = now_token.copy_and_replace(
+ "room_key", room_key
+ )
+
+ defer.returnValue(TimelineBatch(
+ events=recents, prev_batch=prev_batch_token, limited=limited
+ ))
+
+ @defer.inlineCallbacks
+ def incremental_sync_with_gap_for_room(self, room_id, sync_config,
+ since_token, now_token,
+ ephemeral_by_room, tags_by_room):
+ """ Get the incremental delta needed to bring the client up to date for
+ the room. Gives the client the most recent events and the changes to
+ state.
+ Returns:
+ A Deferred JoinedSyncResult
+ """
+ logger.debug("Doing incremental sync for room %s between %s and %s",
+ room_id, since_token, now_token)
+
+ # TODO(mjark): Check for redactions we might have missed.
+
+ batch = yield self.load_filtered_recents(
+ room_id, sync_config, now_token, since_token,
+ )
+
+ logging.debug("Recents %r", batch)
+
+ current_state = yield self.get_state_at(room_id, now_token)
+
+ state_at_previous_sync = yield self.get_state_at(
+ room_id, stream_position=since_token
+ )
+
+ state = yield self.compute_state_delta(
+ since_token=since_token,
+ previous_state=state_at_previous_sync,
+ current_state=current_state,
+ )
+
+ just_joined = yield self.check_joined_room(sync_config, state)
+ if just_joined:
+ state = yield self.get_state_at(room_id, now_token)
+
+ room_sync = JoinedSyncResult(
+ room_id=room_id,
+ timeline=batch,
+ state=state,
+ ephemeral=ephemeral_by_room.get(room_id, []),
+ private_user_data=self.private_user_data_for_room(
+ room_id, tags_by_room
+ ),
+ )
+
+ logging.debug("Room sync: %r", room_sync)
+
+ defer.returnValue(room_sync)
+
+ @defer.inlineCallbacks
+ def incremental_sync_for_archived_room(self, sync_config, leave_event,
+ since_token, tags_by_room):
+ """ Get the incremental delta needed to bring the client up to date for
+ the archived room.
+ Returns:
+ A Deferred ArchivedSyncResult
+ """
+
+ stream_token = yield self.store.get_stream_token_for_event(
+ leave_event.event_id
+ )
+
+ leave_token = since_token.copy_and_replace("room_key", stream_token)
+
+ batch = yield self.load_filtered_recents(
+ leave_event.room_id, sync_config, leave_token, since_token,
+ )
+
+ logging.debug("Recents %r", batch)
+
+ state_events_at_leave = yield self.store.get_state_for_event(
+ leave_event.event_id
+ )
+
+ state_at_previous_sync = yield self.get_state_at(
+ leave_event.room_id, stream_position=since_token
+ )
+
+ state_events_delta = yield self.compute_state_delta(
+ since_token=since_token,
+ previous_state=state_at_previous_sync,
+ current_state=state_events_at_leave,
+ )
+
+ room_sync = ArchivedSyncResult(
+ room_id=leave_event.room_id,
+ timeline=batch,
+ state=state_events_delta,
+ private_user_data=self.private_user_data_for_room(
+ leave_event.room_id, tags_by_room
+ ),
+ )
+
+ logging.debug("Room sync: %r", room_sync)
+
+ defer.returnValue(room_sync)
+
+ @defer.inlineCallbacks
+ def get_state_after_event(self, event):
+ """
+ Get the room state after the given event
+
+ :param synapse.events.EventBase event: event of interest
+ :return: A Deferred map from ((type, state_key)->Event)
+ """
+ state = yield self.store.get_state_for_event(event.event_id)
+ if event.is_state():
+ state = state.copy()
+ state[(event.type, event.state_key)] = event
+ defer.returnValue(state)
+
+ @defer.inlineCallbacks
+ def get_state_at(self, room_id, stream_position):
+ """ Get the room state at a particular stream position
+ :param str room_id: room for which to get state
+ :param StreamToken stream_position: point at which to get state
+ :returns: A Deferred map from ((type, state_key)->Event)
+ """
+ last_events, token = yield self.store.get_recent_events_for_room(
+ room_id, end_token=stream_position.room_key, limit=1,
+ )
+
+ if last_events:
+ last_event = last_events[-1]
+ state = yield self.get_state_after_event(last_event)
+
+ else:
+ # no events in this room - so presumably no state
+ state = {}
+ defer.returnValue(state)
+
+ def compute_state_delta(self, since_token, previous_state, current_state):
+ """ Works out the differnce in state between the current state and the
+ state the client got when it last performed a sync.
+
+ :param str since_token: the point we are comparing against
+ :param dict[(str,str), synapse.events.FrozenEvent] previous_state: the
+ state to compare to
+ :param dict[(str,str), synapse.events.FrozenEvent] current_state: the
+ new state
+
+ :returns A new event dictionary
+ """
+ # TODO(mjark) Check if the state events were received by the server
+ # after the previous sync, since we need to include those state
+ # updates even if they occured logically before the previous event.
+ # TODO(mjark) Check for new redactions in the state events.
+
+ state_delta = {}
+ for key, event in current_state.iteritems():
+ if (key not in previous_state or
+ previous_state[key].event_id != event.event_id):
+ state_delta[key] = event
+ return state_delta
+
+ def check_joined_room(self, sync_config, state_delta):
+ """
+ Check if the user has just joined the given room (so should
+ be given the full state)
+
+ :param sync_config:
+ :param dict[(str,str), synapse.events.FrozenEvent] state_delta: the
+ difference in state since the last sync
+
+ :returns A deferred Tuple (state_delta, limited)
+ """
+ join_event = state_delta.get((
+ EventTypes.Member, sync_config.user.to_string()), None)
+ if join_event is not None:
+ if join_event.content["membership"] == Membership.JOIN:
+ return True
+ return False
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
new file mode 100644
index 00000000..2846f3e6
--- /dev/null
+++ b/synapse/handlers/typing.py
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import BaseHandler
+
+from synapse.api.errors import SynapseError, AuthError
+from synapse.util.logcontext import PreserveLoggingContext
+from synapse.types import UserID
+
+import logging
+
+from collections import namedtuple
+
+logger = logging.getLogger(__name__)
+
+
+# A tiny object useful for storing a user's membership in a room, as a mapping
+# key
+RoomMember = namedtuple("RoomMember", ("room_id", "user"))
+
+
+class TypingNotificationHandler(BaseHandler):
+ def __init__(self, hs):
+ super(TypingNotificationHandler, self).__init__(hs)
+
+ self.homeserver = hs
+
+ self.clock = hs.get_clock()
+
+ self.federation = hs.get_replication_layer()
+
+ self.federation.register_edu_handler("m.typing", self._recv_edu)
+
+ hs.get_distributor().observe("user_left_room", self.user_left_room)
+
+ self._member_typing_until = {} # clock time we expect to stop
+ self._member_typing_timer = {} # deferreds to manage theabove
+
+ # map room IDs to serial numbers
+ self._room_serials = {}
+ self._latest_room_serial = 0
+ # map room IDs to sets of users currently typing
+ self._room_typing = {}
+
+ def tearDown(self):
+ """Cancels all the pending timers.
+ Normally this shouldn't be needed, but it's required from unit tests
+ to avoid a "Reactor was unclean" warning."""
+ for t in self._member_typing_timer.values():
+ self.clock.cancel_call_later(t)
+
+ @defer.inlineCallbacks
+ def started_typing(self, target_user, auth_user, room_id, timeout):
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ if target_user != auth_user:
+ raise AuthError(400, "Cannot set another user's typing state")
+
+ yield self.auth.check_joined_room(room_id, target_user.to_string())
+
+ logger.debug(
+ "%s has started typing in %s", target_user.to_string(), room_id
+ )
+
+ until = self.clock.time_msec() + timeout
+ member = RoomMember(room_id=room_id, user=target_user)
+
+ was_present = member in self._member_typing_until
+
+ if member in self._member_typing_timer:
+ self.clock.cancel_call_later(self._member_typing_timer[member])
+
+ def _cb():
+ logger.debug(
+ "%s has timed out in %s", target_user.to_string(), room_id
+ )
+ self._stopped_typing(member)
+
+ self._member_typing_until[member] = until
+ self._member_typing_timer[member] = self.clock.call_later(
+ timeout / 1000.0, _cb
+ )
+
+ if was_present:
+ # No point sending another notification
+ defer.returnValue(None)
+
+ yield self._push_update(
+ room_id=room_id,
+ user=target_user,
+ typing=True,
+ )
+
+ @defer.inlineCallbacks
+ def stopped_typing(self, target_user, auth_user, room_id):
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this Home Server")
+
+ if target_user != auth_user:
+ raise AuthError(400, "Cannot set another user's typing state")
+
+ yield self.auth.check_joined_room(room_id, target_user.to_string())
+
+ logger.debug(
+ "%s has stopped typing in %s", target_user.to_string(), room_id
+ )
+
+ member = RoomMember(room_id=room_id, user=target_user)
+
+ if member in self._member_typing_timer:
+ self.clock.cancel_call_later(self._member_typing_timer[member])
+ del self._member_typing_timer[member]
+
+ yield self._stopped_typing(member)
+
+ @defer.inlineCallbacks
+ def user_left_room(self, user, room_id):
+ if self.hs.is_mine(user):
+ member = RoomMember(room_id=room_id, user=user)
+ yield self._stopped_typing(member)
+
+ @defer.inlineCallbacks
+ def _stopped_typing(self, member):
+ if member not in self._member_typing_until:
+ # No point
+ defer.returnValue(None)
+
+ yield self._push_update(
+ room_id=member.room_id,
+ user=member.user,
+ typing=False,
+ )
+
+ del self._member_typing_until[member]
+
+ if member in self._member_typing_timer:
+ # Don't cancel it - either it already expired, or the real
+ # stopped_typing() will cancel it
+ del self._member_typing_timer[member]
+
+ @defer.inlineCallbacks
+ def _push_update(self, room_id, user, typing):
+ localusers = set()
+ remotedomains = set()
+
+ rm_handler = self.homeserver.get_handlers().room_member_handler
+ yield rm_handler.fetch_room_distributions_into(
+ room_id, localusers=localusers, remotedomains=remotedomains
+ )
+
+ if localusers:
+ self._push_update_local(
+ room_id=room_id,
+ user=user,
+ typing=typing
+ )
+
+ deferreds = []
+ for domain in remotedomains:
+ deferreds.append(self.federation.send_edu(
+ destination=domain,
+ edu_type="m.typing",
+ content={
+ "room_id": room_id,
+ "user_id": user.to_string(),
+ "typing": typing,
+ },
+ ))
+
+ yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ @defer.inlineCallbacks
+ def _recv_edu(self, origin, content):
+ room_id = content["room_id"]
+ user = UserID.from_string(content["user_id"])
+
+ localusers = set()
+
+ rm_handler = self.homeserver.get_handlers().room_member_handler
+ yield rm_handler.fetch_room_distributions_into(
+ room_id, localusers=localusers
+ )
+
+ if localusers:
+ self._push_update_local(
+ room_id=room_id,
+ user=user,
+ typing=content["typing"]
+ )
+
+ def _push_update_local(self, room_id, user, typing):
+ room_set = self._room_typing.setdefault(room_id, set())
+ if typing:
+ room_set.add(user)
+ else:
+ room_set.discard(user)
+
+ self._latest_room_serial += 1
+ self._room_serials[room_id] = self._latest_room_serial
+
+ with PreserveLoggingContext():
+ self.notifier.on_new_event(
+ "typing_key", self._latest_room_serial, rooms=[room_id]
+ )
+
+
+class TypingNotificationEventSource(object):
+ def __init__(self, hs):
+ self.hs = hs
+ self._handler = None
+ self._room_member_handler = None
+
+ def handler(self):
+ # Avoid cyclic dependency in handler setup
+ if not self._handler:
+ self._handler = self.hs.get_handlers().typing_notification_handler
+ return self._handler
+
+ def room_member_handler(self):
+ if not self._room_member_handler:
+ self._room_member_handler = self.hs.get_handlers().room_member_handler
+ return self._room_member_handler
+
+ def _make_event_for(self, room_id):
+ typing = self.handler()._room_typing[room_id]
+ return {
+ "type": "m.typing",
+ "room_id": room_id,
+ "content": {
+ "user_ids": [u.to_string() for u in typing],
+ },
+ }
+
+ def get_new_events(self, from_key, room_ids, **kwargs):
+ from_key = int(from_key)
+ handler = self.handler()
+
+ events = []
+ for room_id in room_ids:
+ if room_id not in handler._room_serials:
+ continue
+ if handler._room_serials[room_id] <= from_key:
+ continue
+
+ events.append(self._make_event_for(room_id))
+
+ return events, handler._latest_room_serial
+
+ def get_current_key(self):
+ return self.handler()._latest_room_serial
+
+ def get_pagination_rows(self, user, pagination_config, key):
+ return ([], pagination_config.from_key)
diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/http/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/http/client.py b/synapse/http/client.py
new file mode 100644
index 00000000..27e51902
--- /dev/null
+++ b/synapse/http/client.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from OpenSSL import SSL
+from OpenSSL.SSL import VERIFY_NONE
+
+from synapse.api.errors import CodeMessageException
+from synapse.util.logcontext import preserve_context_over_fn
+import synapse.metrics
+
+from canonicaljson import encode_canonical_json
+
+from twisted.internet import defer, reactor, ssl
+from twisted.web.client import (
+ Agent, readBody, FileBodyProducer, PartialDownloadError,
+)
+from twisted.web.http_headers import Headers
+
+from StringIO import StringIO
+
+import simplejson as json
+import logging
+import urllib
+
+
+logger = logging.getLogger(__name__)
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+outgoing_requests_counter = metrics.register_counter(
+ "requests",
+ labels=["method"],
+)
+incoming_responses_counter = metrics.register_counter(
+ "responses",
+ labels=["method", "code"],
+)
+
+
+class SimpleHttpClient(object):
+ """
+ A simple, no-frills HTTP client with methods that wrap up common ways of
+ using HTTP in Matrix
+ """
+ def __init__(self, hs):
+ self.hs = hs
+ # The default context factory in Twisted 14.0.0 (which we require) is
+ # BrowserLikePolicyForHTTPS which will do regular cert validation
+ # 'like a browser'
+ self.agent = Agent(
+ reactor,
+ connectTimeout=15,
+ contextFactory=hs.get_http_client_context_factory()
+ )
+ self.user_agent = hs.version_string
+ if hs.config.user_agent_suffix:
+ self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
+
+ def request(self, method, uri, *args, **kwargs):
+ # A small wrapper around self.agent.request() so we can easily attach
+ # counters to it
+ outgoing_requests_counter.inc(method)
+ d = preserve_context_over_fn(
+ self.agent.request,
+ method, uri, *args, **kwargs
+ )
+
+ logger.info("Sending request %s %s", method, uri)
+
+ def _cb(response):
+ incoming_responses_counter.inc(method, response.code)
+ logger.info(
+ "Received response to %s %s: %s",
+ method, uri, response.code
+ )
+ return response
+
+ def _eb(failure):
+ incoming_responses_counter.inc(method, "ERR")
+ logger.info(
+ "Error sending request to %s %s: %s %s",
+ method, uri, failure.type, failure.getErrorMessage()
+ )
+ return failure
+
+ d.addCallbacks(_cb, _eb)
+
+ return d
+
+ @defer.inlineCallbacks
+ def post_urlencoded_get_json(self, uri, args={}):
+ # TODO: Do we ever want to log message contents?
+ logger.debug("post_urlencoded_get_json args: %s", args)
+
+ query_bytes = urllib.urlencode(args, True)
+
+ response = yield self.request(
+ "POST",
+ uri.encode("ascii"),
+ headers=Headers({
+ b"Content-Type": [b"application/x-www-form-urlencoded"],
+ b"User-Agent": [self.user_agent],
+ }),
+ bodyProducer=FileBodyProducer(StringIO(query_bytes))
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def post_json_get_json(self, uri, post_json):
+ json_str = encode_canonical_json(post_json)
+
+ logger.debug("HTTP POST %s -> %s", json_str, uri)
+
+ response = yield self.request(
+ "POST",
+ uri.encode("ascii"),
+ headers=Headers({
+ b"Content-Type": [b"application/json"],
+ b"User-Agent": [self.user_agent],
+ }),
+ bodyProducer=FileBodyProducer(StringIO(json_str))
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def get_json(self, uri, args={}):
+ """ Gets some json from the given URI.
+
+ Args:
+ uri (str): The URI to request, not including query parameters
+ args (dict): A dictionary used to create query strings, defaults to
+ None.
+ **Note**: The value of each key is assumed to be an iterable
+ and *not* a string.
+ Returns:
+ Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+ HTTP body as JSON.
+ Raises:
+ On a non-2xx HTTP response. The response body will be used as the
+ error message.
+ """
+ body = yield self.get_raw(uri, args)
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def put_json(self, uri, json_body, args={}):
+ """ Puts some json to the given URI.
+
+ Args:
+ uri (str): The URI to request, not including query parameters
+ json_body (dict): The JSON to put in the HTTP body,
+ args (dict): A dictionary used to create query strings, defaults to
+ None.
+ **Note**: The value of each key is assumed to be an iterable
+ and *not* a string.
+ Returns:
+ Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+ HTTP body as JSON.
+ Raises:
+ On a non-2xx HTTP response.
+ """
+ if len(args):
+ query_bytes = urllib.urlencode(args, True)
+ uri = "%s?%s" % (uri, query_bytes)
+
+ json_str = encode_canonical_json(json_body)
+
+ response = yield self.request(
+ "PUT",
+ uri.encode("ascii"),
+ headers=Headers({
+ b"User-Agent": [self.user_agent],
+ "Content-Type": ["application/json"]
+ }),
+ bodyProducer=FileBodyProducer(StringIO(json_str))
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ if 200 <= response.code < 300:
+ defer.returnValue(json.loads(body))
+ else:
+ # NB: This is explicitly not json.loads(body)'d because the contract
+ # of CodeMessageException is a *string* message. Callers can always
+ # load it into JSON if they want.
+ raise CodeMessageException(response.code, body)
+
+ @defer.inlineCallbacks
+ def get_raw(self, uri, args={}):
+ """ Gets raw text from the given URI.
+
+ Args:
+ uri (str): The URI to request, not including query parameters
+ args (dict): A dictionary used to create query strings, defaults to
+ None.
+ **Note**: The value of each key is assumed to be an iterable
+ and *not* a string.
+ Returns:
+ Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+ HTTP body at text.
+ Raises:
+ On a non-2xx HTTP response. The response body will be used as the
+ error message.
+ """
+ if len(args):
+ query_bytes = urllib.urlencode(args, True)
+ uri = "%s?%s" % (uri, query_bytes)
+
+ response = yield self.request(
+ "GET",
+ uri.encode("ascii"),
+ headers=Headers({
+ b"User-Agent": [self.user_agent],
+ })
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ if 200 <= response.code < 300:
+ defer.returnValue(body)
+ else:
+ raise CodeMessageException(response.code, body)
+
+
+class CaptchaServerHttpClient(SimpleHttpClient):
+ """
+ Separate HTTP client for talking to google's captcha servers
+ Only slightly special because accepts partial download responses
+
+ used only by c/s api v1
+ """
+
+ @defer.inlineCallbacks
+ def post_urlencoded_get_raw(self, url, args={}):
+ query_bytes = urllib.urlencode(args, True)
+
+ response = yield self.request(
+ "POST",
+ url.encode("ascii"),
+ bodyProducer=FileBodyProducer(StringIO(query_bytes)),
+ headers=Headers({
+ b"Content-Type": [b"application/x-www-form-urlencoded"],
+ b"User-Agent": [self.user_agent],
+ })
+ )
+
+ try:
+ body = yield preserve_context_over_fn(readBody, response)
+ defer.returnValue(body)
+ except PartialDownloadError as e:
+ # twisted dislikes google's response, no content length.
+ defer.returnValue(e.response)
+
+
+def _print_ex(e):
+ if hasattr(e, "reasons") and e.reasons:
+ for ex in e.reasons:
+ _print_ex(ex)
+ else:
+ logger.exception(e)
+
+
+class InsecureInterceptableContextFactory(ssl.ContextFactory):
+ """
+ Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain.
+
+ Do not use this since it allows an attacker to intercept your communications.
+ """
+
+ def __init__(self):
+ self._context = SSL.Context(SSL.SSLv23_METHOD)
+ self._context.set_verify(VERIFY_NONE, lambda *_: None)
+
+ def getContext(self, hostname, port):
+ return self._context
diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py
new file mode 100644
index 00000000..4ae45f13
--- /dev/null
+++ b/synapse/http/endpoint.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
+from twisted.internet import defer
+from twisted.internet.error import ConnectError
+from twisted.names import client, dns
+from twisted.names.error import DNSNameError
+
+import collections
+import logging
+import random
+
+
+logger = logging.getLogger(__name__)
+
+
+def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None,
+ timeout=None):
+ """Construct an endpoint for the given matrix destination.
+
+ Args:
+ reactor: Twisted reactor.
+ destination (bytes): The name of the server to connect to.
+ ssl_context_factory (twisted.internet.ssl.ContextFactory): Factory
+ which generates SSL contexts to use for TLS.
+ timeout (int): connection timeout in seconds
+ """
+
+ domain_port = destination.split(":")
+ domain = domain_port[0]
+ port = int(domain_port[1]) if domain_port[1:] else None
+
+ endpoint_kw_args = {}
+
+ if timeout is not None:
+ endpoint_kw_args.update(timeout=timeout)
+
+ if ssl_context_factory is None:
+ transport_endpoint = TCP4ClientEndpoint
+ default_port = 8008
+ else:
+ transport_endpoint = SSL4ClientEndpoint
+ endpoint_kw_args.update(sslContextFactory=ssl_context_factory)
+ default_port = 8448
+
+ if port is None:
+ return SRVClientEndpoint(
+ reactor, "matrix", domain, protocol="tcp",
+ default_port=default_port, endpoint=transport_endpoint,
+ endpoint_kw_args=endpoint_kw_args
+ )
+ else:
+ return transport_endpoint(reactor, domain, port, **endpoint_kw_args)
+
+
+class SRVClientEndpoint(object):
+ """An endpoint which looks up SRV records for a service.
+ Cycles through the list of servers starting with each call to connect
+ picking the next server.
+ Implements twisted.internet.interfaces.IStreamClientEndpoint.
+ """
+
+ _Server = collections.namedtuple(
+ "_Server", "priority weight host port"
+ )
+
+ def __init__(self, reactor, service, domain, protocol="tcp",
+ default_port=None, endpoint=TCP4ClientEndpoint,
+ endpoint_kw_args={}):
+ self.reactor = reactor
+ self.service_name = "_%s._%s.%s" % (service, protocol, domain)
+
+ if default_port is not None:
+ self.default_server = self._Server(
+ host=domain,
+ port=default_port,
+ priority=0,
+ weight=0
+ )
+ else:
+ self.default_server = None
+
+ self.endpoint = endpoint
+ self.endpoint_kw_args = endpoint_kw_args
+
+ self.servers = None
+ self.used_servers = None
+
+ @defer.inlineCallbacks
+ def fetch_servers(self):
+ try:
+ answers, auth, add = yield client.lookupService(self.service_name)
+ except DNSNameError:
+ answers = []
+
+ if (len(answers) == 1
+ and answers[0].type == dns.SRV
+ and answers[0].payload
+ and answers[0].payload.target == dns.Name('.')):
+ raise ConnectError("Service %s unavailable", self.service_name)
+
+ self.servers = []
+ self.used_servers = []
+
+ for answer in answers:
+ if answer.type != dns.SRV or not answer.payload:
+ continue
+ payload = answer.payload
+ self.servers.append(self._Server(
+ host=str(payload.target),
+ port=int(payload.port),
+ priority=int(payload.priority),
+ weight=int(payload.weight)
+ ))
+
+ self.servers.sort()
+
+ def pick_server(self):
+ if not self.servers:
+ if self.used_servers:
+ self.servers = self.used_servers
+ self.used_servers = []
+ self.servers.sort()
+ elif self.default_server:
+ return self.default_server
+ else:
+ raise ConnectError(
+ "Not server available for %s", self.service_name
+ )
+
+ min_priority = self.servers[0].priority
+ weight_indexes = list(
+ (index, server.weight + 1)
+ for index, server in enumerate(self.servers)
+ if server.priority == min_priority
+ )
+
+ total_weight = sum(weight for index, weight in weight_indexes)
+ target_weight = random.randint(0, total_weight)
+
+ for index, weight in weight_indexes:
+ target_weight -= weight
+ if target_weight <= 0:
+ server = self.servers[index]
+ del self.servers[index]
+ self.used_servers.append(server)
+ return server
+
+ @defer.inlineCallbacks
+ def connect(self, protocolFactory):
+ if self.servers is None:
+ yield self.fetch_servers()
+ server = self.pick_server()
+ logger.info("Connecting to %s:%s", server.host, server.port)
+ endpoint = self.endpoint(
+ self.reactor, server.host, server.port, **self.endpoint_kw_args
+ )
+ connection = yield endpoint.connect(protocolFactory)
+ defer.returnValue(connection)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
new file mode 100644
index 00000000..ca959155
--- /dev/null
+++ b/synapse/http/matrixfederationclient.py
@@ -0,0 +1,502 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer, reactor, protocol
+from twisted.internet.error import DNSLookupError
+from twisted.web.client import readBody, HTTPConnectionPool, Agent
+from twisted.web.http_headers import Headers
+from twisted.web._newclient import ResponseDone
+
+from synapse.http.endpoint import matrix_federation_endpoint
+from synapse.util.async import sleep
+from synapse.util.logcontext import preserve_context_over_fn
+import synapse.metrics
+
+from canonicaljson import encode_canonical_json
+
+from synapse.api.errors import (
+ SynapseError, Codes, HttpResponseException,
+)
+
+from signedjson.sign import sign_json
+
+import simplejson as json
+import logging
+import random
+import sys
+import urllib
+import urlparse
+
+
+logger = logging.getLogger(__name__)
+outbound_logger = logging.getLogger("synapse.http.outbound")
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+outgoing_requests_counter = metrics.register_counter(
+ "requests",
+ labels=["method"],
+)
+incoming_responses_counter = metrics.register_counter(
+ "responses",
+ labels=["method", "code"],
+)
+
+
+MAX_RETRIES = 10
+
+
+class MatrixFederationEndpointFactory(object):
+ def __init__(self, hs):
+ self.tls_server_context_factory = hs.tls_server_context_factory
+
+ def endpointForURI(self, uri):
+ destination = uri.netloc
+
+ return matrix_federation_endpoint(
+ reactor, destination, timeout=10,
+ ssl_context_factory=self.tls_server_context_factory
+ )
+
+
+class MatrixFederationHttpClient(object):
+ """HTTP client used to talk to other homeservers over the federation
+ protocol. Send client certificates and signs requests.
+
+ Attributes:
+ agent (twisted.web.client.Agent): The twisted Agent used to send the
+ requests.
+ """
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.signing_key = hs.config.signing_key[0]
+ self.server_name = hs.hostname
+ pool = HTTPConnectionPool(reactor)
+ pool.maxPersistentPerHost = 10
+ self.agent = Agent.usingEndpointFactory(
+ reactor, MatrixFederationEndpointFactory(hs), pool=pool
+ )
+ self.clock = hs.get_clock()
+ self.version_string = hs.version_string
+ self._next_id = 1
+
+ def _create_url(self, destination, path_bytes, param_bytes, query_bytes):
+ return urlparse.urlunparse(
+ ("matrix", destination, path_bytes, param_bytes, query_bytes, "")
+ )
+
+ @defer.inlineCallbacks
+ def _create_request(self, destination, method, path_bytes,
+ body_callback, headers_dict={}, param_bytes=b"",
+ query_bytes=b"", retry_on_dns_fail=True,
+ timeout=None):
+ """ Creates and sends a request to the given url
+ """
+ headers_dict[b"User-Agent"] = [self.version_string]
+ headers_dict[b"Host"] = [destination]
+
+ url_bytes = self._create_url(
+ destination, path_bytes, param_bytes, query_bytes
+ )
+
+ txn_id = "%s-O-%s" % (method, self._next_id)
+ self._next_id = (self._next_id + 1) % (sys.maxint - 1)
+
+ outbound_logger.info(
+ "{%s} [%s] Sending request: %s %s",
+ txn_id, destination, method, url_bytes
+ )
+
+ # XXX: Would be much nicer to retry only at the transaction-layer
+ # (once we have reliable transactions in place)
+ retries_left = MAX_RETRIES
+
+ http_url_bytes = urlparse.urlunparse(
+ ("", "", path_bytes, param_bytes, query_bytes, "")
+ )
+
+ log_result = None
+ try:
+ while True:
+ producer = None
+ if body_callback:
+ producer = body_callback(method, http_url_bytes, headers_dict)
+
+ try:
+ def send_request():
+ request_deferred = preserve_context_over_fn(
+ self.agent.request,
+ method,
+ url_bytes,
+ Headers(headers_dict),
+ producer
+ )
+
+ return self.clock.time_bound_deferred(
+ request_deferred,
+ time_out=timeout/1000. if timeout else 60,
+ )
+
+ response = yield preserve_context_over_fn(
+ send_request,
+ )
+
+ log_result = "%d %s" % (response.code, response.phrase,)
+ break
+ except Exception as e:
+ if not retry_on_dns_fail and isinstance(e, DNSLookupError):
+ logger.warn(
+ "DNS Lookup failed to %s with %s",
+ destination,
+ e
+ )
+ log_result = "DNS Lookup failed to %s with %s" % (
+ destination, e
+ )
+ raise
+
+ logger.warn(
+ "{%s} Sending request failed to %s: %s %s: %s - %s",
+ txn_id,
+ destination,
+ method,
+ url_bytes,
+ type(e).__name__,
+ _flatten_response_never_received(e),
+ )
+
+ log_result = "%s - %s" % (
+ type(e).__name__, _flatten_response_never_received(e),
+ )
+
+ if retries_left and not timeout:
+ delay = 4 ** (MAX_RETRIES + 1 - retries_left)
+ delay = max(delay, 60)
+ delay *= random.uniform(0.8, 1.4)
+ yield sleep(delay)
+ retries_left -= 1
+ else:
+ raise
+ finally:
+ outbound_logger.info(
+ "{%s} [%s] Result: %s",
+ txn_id,
+ destination,
+ log_result,
+ )
+
+ if 200 <= response.code < 300:
+ pass
+ else:
+ # :'(
+ # Update transactions table?
+ body = yield preserve_context_over_fn(readBody, response)
+ raise HttpResponseException(
+ response.code, response.phrase, body
+ )
+
+ defer.returnValue(response)
+
+ def sign_request(self, destination, method, url_bytes, headers_dict,
+ content=None):
+ request = {
+ "method": method,
+ "uri": url_bytes,
+ "origin": self.server_name,
+ "destination": destination,
+ }
+
+ if content is not None:
+ request["content"] = content
+
+ request = sign_json(request, self.server_name, self.signing_key)
+
+ auth_headers = []
+
+ for key, sig in request["signatures"][self.server_name].items():
+ auth_headers.append(bytes(
+ "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
+ self.server_name, key, sig,
+ )
+ ))
+
+ headers_dict[b"Authorization"] = auth_headers
+
+ @defer.inlineCallbacks
+ def put_json(self, destination, path, data={}, json_data_callback=None):
+ """ Sends the specifed json data using PUT
+
+ Args:
+ destination (str): The remote server to send the HTTP request
+ to.
+ path (str): The HTTP path.
+ data (dict): A dict containing the data that will be used as
+ the request body. This will be encoded as JSON.
+ json_data_callback (callable): A callable returning the dict to
+ use as the request body.
+
+ Returns:
+ Deferred: Succeeds when we get a 2xx HTTP response. The result
+ will be the decoded JSON body. On a 4xx or 5xx error response a
+ CodeMessageException is raised.
+ """
+
+ if not json_data_callback:
+ def json_data_callback():
+ return data
+
+ def body_callback(method, url_bytes, headers_dict):
+ json_data = json_data_callback()
+ self.sign_request(
+ destination, method, url_bytes, headers_dict, json_data
+ )
+ producer = _JsonProducer(json_data)
+ return producer
+
+ response = yield self._create_request(
+ destination.encode("ascii"),
+ "PUT",
+ path.encode("ascii"),
+ body_callback=body_callback,
+ headers_dict={"Content-Type": ["application/json"]},
+ )
+
+ if 200 <= response.code < 300:
+ # We need to update the transactions table to say it was sent?
+ c_type = response.headers.getRawHeaders("Content-Type")
+
+ if "application/json" not in c_type:
+ raise RuntimeError(
+ "Content-Type not application/json"
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def post_json(self, destination, path, data={}):
+ """ Sends the specifed json data using POST
+
+ Args:
+ destination (str): The remote server to send the HTTP request
+ to.
+ path (str): The HTTP path.
+ data (dict): A dict containing the data that will be used as
+ the request body. This will be encoded as JSON.
+
+ Returns:
+ Deferred: Succeeds when we get a 2xx HTTP response. The result
+ will be the decoded JSON body. On a 4xx or 5xx error response a
+ CodeMessageException is raised.
+ """
+
+ def body_callback(method, url_bytes, headers_dict):
+ self.sign_request(
+ destination, method, url_bytes, headers_dict, data
+ )
+ return _JsonProducer(data)
+
+ response = yield self._create_request(
+ destination.encode("ascii"),
+ "POST",
+ path.encode("ascii"),
+ body_callback=body_callback,
+ headers_dict={"Content-Type": ["application/json"]},
+ )
+
+ if 200 <= response.code < 300:
+ # We need to update the transactions table to say it was sent?
+ c_type = response.headers.getRawHeaders("Content-Type")
+
+ if "application/json" not in c_type:
+ raise RuntimeError(
+ "Content-Type not application/json"
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def get_json(self, destination, path, args={}, retry_on_dns_fail=True,
+ timeout=None):
+ """ GETs some json from the given host homeserver and path
+
+ Args:
+ destination (str): The remote server to send the HTTP request
+ to.
+ path (str): The HTTP path.
+ args (dict): A dictionary used to create query strings, defaults to
+ None.
+ timeout (int): How long to try (in ms) the destination for before
+ giving up. None indicates no timeout and that the request will
+ be retried.
+ Returns:
+ Deferred: Succeeds when we get *any* HTTP response.
+
+ The result of the deferred is a tuple of `(code, response)`,
+ where `response` is a dict representing the decoded JSON body.
+ """
+ logger.debug("get_json args: %s", args)
+
+ encoded_args = {}
+ for k, vs in args.items():
+ if isinstance(vs, basestring):
+ vs = [vs]
+ encoded_args[k] = [v.encode("UTF-8") for v in vs]
+
+ query_bytes = urllib.urlencode(encoded_args, True)
+ logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
+
+ def body_callback(method, url_bytes, headers_dict):
+ self.sign_request(destination, method, url_bytes, headers_dict)
+ return None
+
+ response = yield self._create_request(
+ destination.encode("ascii"),
+ "GET",
+ path.encode("ascii"),
+ query_bytes=query_bytes,
+ body_callback=body_callback,
+ retry_on_dns_fail=retry_on_dns_fail,
+ timeout=timeout,
+ )
+
+ if 200 <= response.code < 300:
+ # We need to update the transactions table to say it was sent?
+ c_type = response.headers.getRawHeaders("Content-Type")
+
+ if "application/json" not in c_type:
+ raise RuntimeError(
+ "Content-Type not application/json"
+ )
+
+ body = yield preserve_context_over_fn(readBody, response)
+
+ defer.returnValue(json.loads(body))
+
+ @defer.inlineCallbacks
+ def get_file(self, destination, path, output_stream, args={},
+ retry_on_dns_fail=True, max_size=None):
+ """GETs a file from a given homeserver
+ Args:
+ destination (str): The remote server to send the HTTP request to.
+ path (str): The HTTP path to GET.
+ output_stream (file): File to write the response body to.
+ args (dict): Optional dictionary used to create the query string.
+ Returns:
+ A (int,dict) tuple of the file length and a dict of the response
+ headers.
+ """
+
+ encoded_args = {}
+ for k, vs in args.items():
+ if isinstance(vs, basestring):
+ vs = [vs]
+ encoded_args[k] = [v.encode("UTF-8") for v in vs]
+
+ query_bytes = urllib.urlencode(encoded_args, True)
+ logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
+
+ def body_callback(method, url_bytes, headers_dict):
+ self.sign_request(destination, method, url_bytes, headers_dict)
+ return None
+
+ response = yield self._create_request(
+ destination.encode("ascii"),
+ "GET",
+ path.encode("ascii"),
+ query_bytes=query_bytes,
+ body_callback=body_callback,
+ retry_on_dns_fail=retry_on_dns_fail
+ )
+
+ headers = dict(response.headers.getAllRawHeaders())
+
+ try:
+ length = yield preserve_context_over_fn(
+ _readBodyToFile,
+ response, output_stream, max_size
+ )
+ except:
+ logger.exception("Failed to download body")
+ raise
+
+ defer.returnValue((length, headers))
+
+
+class _ReadBodyToFileProtocol(protocol.Protocol):
+ def __init__(self, stream, deferred, max_size):
+ self.stream = stream
+ self.deferred = deferred
+ self.length = 0
+ self.max_size = max_size
+
+ def dataReceived(self, data):
+ self.stream.write(data)
+ self.length += len(data)
+ if self.max_size is not None and self.length >= self.max_size:
+ self.deferred.errback(SynapseError(
+ 502,
+ "Requested file is too large > %r bytes" % (self.max_size,),
+ Codes.TOO_LARGE,
+ ))
+ self.deferred = defer.Deferred()
+ self.transport.loseConnection()
+
+ def connectionLost(self, reason):
+ if reason.check(ResponseDone):
+ self.deferred.callback(self.length)
+ else:
+ self.deferred.errback(reason)
+
+
+def _readBodyToFile(response, stream, max_size):
+ d = defer.Deferred()
+ response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size))
+ return d
+
+
+class _JsonProducer(object):
+ """ Used by the twisted http client to create the HTTP body from json
+ """
+ def __init__(self, jsn):
+ self.reset(jsn)
+
+ def reset(self, jsn):
+ self.body = encode_canonical_json(jsn)
+ self.length = len(self.body)
+
+ def startProducing(self, consumer):
+ consumer.write(self.body)
+ return defer.succeed(None)
+
+ def pauseProducing(self):
+ pass
+
+ def stopProducing(self):
+ pass
+
+
+def _flatten_response_never_received(e):
+ if hasattr(e, "reasons"):
+ return ", ".join(
+ _flatten_response_never_received(f.value)
+ for f in e.reasons
+ )
+ else:
+ return "%s: %s" % (type(e).__name__, e.message,)
diff --git a/synapse/http/server.py b/synapse/http/server.py
new file mode 100644
index 00000000..50feea6f
--- /dev/null
+++ b/synapse/http/server.py
@@ -0,0 +1,325 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.api.errors import (
+ cs_exception, SynapseError, CodeMessageException, UnrecognizedRequestError
+)
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+import synapse.metrics
+import synapse.events
+
+from canonicaljson import (
+ encode_canonical_json, encode_pretty_printed_json
+)
+
+from twisted.internet import defer
+from twisted.web import server, resource
+from twisted.web.server import NOT_DONE_YET
+from twisted.web.util import redirectTo
+
+import collections
+import logging
+import urllib
+import ujson
+
+logger = logging.getLogger(__name__)
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+incoming_requests_counter = metrics.register_counter(
+ "requests",
+ labels=["method", "servlet"],
+)
+outgoing_responses_counter = metrics.register_counter(
+ "responses",
+ labels=["method", "code"],
+)
+
+response_timer = metrics.register_distribution(
+ "response_time",
+ labels=["method", "servlet"]
+)
+
+_next_request_id = 0
+
+
+def request_handler(request_handler):
+ """Wraps a method that acts as a request handler with the necessary logging
+ and exception handling.
+
+ The method must have a signature of "handle_foo(self, request)". The
+ argument "self" must have "version_string" and "clock" attributes. The
+ argument "request" must be a twisted HTTP request.
+
+ The method must return a deferred. If the deferred succeeds we assume that
+ a response has been sent. If the deferred fails with a SynapseError we use
+ it to send a JSON response with the appropriate HTTP reponse code. If the
+ deferred fails with any other type of error we send a 500 reponse.
+
+ We insert a unique request-id into the logging context for this request and
+ log the response and duration for this request.
+ """
+
+ @defer.inlineCallbacks
+ def wrapped_request_handler(self, request):
+ global _next_request_id
+ request_id = "%s-%s" % (request.method, _next_request_id)
+ _next_request_id += 1
+ with LoggingContext(request_id) as request_context:
+ request_context.request = request_id
+ with request.processing():
+ try:
+ d = request_handler(self, request)
+ with PreserveLoggingContext():
+ yield d
+ except CodeMessageException as e:
+ code = e.code
+ if isinstance(e, SynapseError):
+ logger.info(
+ "%s SynapseError: %s - %s", request, code, e.msg
+ )
+ else:
+ logger.exception(e)
+ outgoing_responses_counter.inc(request.method, str(code))
+ respond_with_json(
+ request, code, cs_exception(e), send_cors=True,
+ pretty_print=_request_user_agent_is_curl(request),
+ version_string=self.version_string,
+ )
+ except:
+ logger.exception(
+ "Failed handle request %s.%s on %r: %r",
+ request_handler.__module__,
+ request_handler.__name__,
+ self,
+ request
+ )
+ respond_with_json(
+ request,
+ 500,
+ {"error": "Internal server error"},
+ send_cors=True
+ )
+ return wrapped_request_handler
+
+
+class HttpServer(object):
+ """ Interface for registering callbacks on a HTTP server
+ """
+
+ def register_path(self, method, path_pattern, callback):
+ """ Register a callback that gets fired if we receive a http request
+ with the given method for a path that matches the given regex.
+
+ If the regex contains groups these gets passed to the calback via
+ an unpacked tuple.
+
+ Args:
+ method (str): The method to listen to.
+ path_pattern (str): The regex used to match requests.
+ callback (function): The function to fire if we receive a matched
+ request. The first argument will be the request object and
+ subsequent arguments will be any matched groups from the regex.
+ This should return a tuple of (code, response).
+ """
+ pass
+
+
+class JsonResource(HttpServer, resource.Resource):
+ """ This implements the HttpServer interface and provides JSON support for
+ Resources.
+
+ Register callbacks via register_path()
+
+ Callbacks can return a tuple of status code and a dict in which case the
+ the dict will automatically be sent to the client as a JSON object.
+
+ The JsonResource is primarily intended for returning JSON, but callbacks
+ may send something other than JSON, they may do so by using the methods
+ on the request object and instead returning None.
+ """
+
+ isLeaf = True
+
+ _PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"])
+
+ def __init__(self, hs, canonical_json=True):
+ resource.Resource.__init__(self)
+
+ self.canonical_json = canonical_json
+ self.clock = hs.get_clock()
+ self.path_regexs = {}
+ self.version_string = hs.version_string
+ self.hs = hs
+
+ def register_path(self, method, path_pattern, callback):
+ self.path_regexs.setdefault(method, []).append(
+ self._PathEntry(path_pattern, callback)
+ )
+
+ def render(self, request):
+ """ This gets called by twisted every time someone sends us a request.
+ """
+ self._async_render(request)
+ return server.NOT_DONE_YET
+
+ @request_handler
+ @defer.inlineCallbacks
+ def _async_render(self, request):
+ """ This gets called from render() every time someone sends us a request.
+ This checks if anyone has registered a callback for that method and
+ path.
+ """
+ start = self.clock.time_msec()
+ if request.method == "OPTIONS":
+ self._send_response(request, 200, {})
+ return
+ # Loop through all the registered callbacks to check if the method
+ # and path regex match
+ for path_entry in self.path_regexs.get(request.method, []):
+ m = path_entry.pattern.match(request.path)
+ if not m:
+ continue
+
+ # We found a match! Trigger callback and then return the
+ # returned response. We pass both the request and any
+ # matched groups from the regex to the callback.
+
+ callback = path_entry.callback
+
+ servlet_instance = getattr(callback, "__self__", None)
+ if servlet_instance is not None:
+ servlet_classname = servlet_instance.__class__.__name__
+ else:
+ servlet_classname = "%r" % callback
+ incoming_requests_counter.inc(request.method, servlet_classname)
+
+ args = [
+ urllib.unquote(u).decode("UTF-8") if u else u for u in m.groups()
+ ]
+
+ callback_return = yield callback(request, *args)
+ if callback_return is not None:
+ code, response = callback_return
+ self._send_response(request, code, response)
+
+ response_timer.inc_by(
+ self.clock.time_msec() - start, request.method, servlet_classname
+ )
+
+ return
+
+ # Huh. No one wanted to handle that? Fiiiiiine. Send 400.
+ raise UnrecognizedRequestError()
+
+ def _send_response(self, request, code, response_json_object,
+ response_code_message=None):
+ # could alternatively use request.notifyFinish() and flip a flag when
+ # the Deferred fires, but since the flag is RIGHT THERE it seems like
+ # a waste.
+ if request._disconnected:
+ logger.warn(
+ "Not sending response to request %s, already disconnected.",
+ request)
+ return
+
+ outgoing_responses_counter.inc(request.method, str(code))
+
+ # TODO: Only enable CORS for the requests that need it.
+ respond_with_json(
+ request, code, response_json_object,
+ send_cors=True,
+ response_code_message=response_code_message,
+ pretty_print=_request_user_agent_is_curl(request),
+ version_string=self.version_string,
+ canonical_json=self.canonical_json,
+ )
+
+
+class RootRedirect(resource.Resource):
+ """Redirects the root '/' path to another path."""
+
+ def __init__(self, path):
+ resource.Resource.__init__(self)
+ self.url = path
+
+ def render_GET(self, request):
+ return redirectTo(self.url, request)
+
+ def getChild(self, name, request):
+ if len(name) == 0:
+ return self # select ourselves as the child to render
+ return resource.Resource.getChild(self, name, request)
+
+
+def respond_with_json(request, code, json_object, send_cors=False,
+ response_code_message=None, pretty_print=False,
+ version_string="", canonical_json=True):
+ if pretty_print:
+ json_bytes = encode_pretty_printed_json(json_object) + "\n"
+ else:
+ if canonical_json or synapse.events.USE_FROZEN_DICTS:
+ json_bytes = encode_canonical_json(json_object)
+ else:
+ # ujson doesn't like frozen_dicts.
+ json_bytes = ujson.dumps(json_object, ensure_ascii=False)
+
+ return respond_with_json_bytes(
+ request, code, json_bytes,
+ send_cors=send_cors,
+ response_code_message=response_code_message,
+ version_string=version_string
+ )
+
+
+def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
+ version_string="", response_code_message=None):
+ """Sends encoded JSON in response to the given request.
+
+ Args:
+ request (twisted.web.http.Request): The http request to respond to.
+ code (int): The HTTP response code.
+ json_bytes (bytes): The json bytes to use as the response body.
+ send_cors (bool): Whether to send Cross-Origin Resource Sharing headers
+ http://www.w3.org/TR/cors/
+ Returns:
+ twisted.web.server.NOT_DONE_YET"""
+
+ request.setResponseCode(code, message=response_code_message)
+ request.setHeader(b"Content-Type", b"application/json")
+ request.setHeader(b"Server", version_string)
+ request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
+
+ if send_cors:
+ request.setHeader("Access-Control-Allow-Origin", "*")
+ request.setHeader("Access-Control-Allow-Methods",
+ "GET, POST, PUT, DELETE, OPTIONS")
+ request.setHeader("Access-Control-Allow-Headers",
+ "Origin, X-Requested-With, Content-Type, Accept")
+
+ request.write(json_bytes)
+ request.finish()
+ return NOT_DONE_YET
+
+
+def _request_user_agent_is_curl(request):
+ user_agents = request.requestHeaders.getRawHeaders(
+ "User-Agent", default=[]
+ )
+ for user_agent in user_agents:
+ if "curl" in user_agent:
+ return True
+ return False
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
new file mode 100644
index 00000000..9cda17fc
--- /dev/null
+++ b/synapse/http/servlet.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module contains base REST classes for constructing REST servlets. """
+
+from synapse.api.errors import SynapseError
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+def parse_integer(request, name, default=None, required=False):
+ if name in request.args:
+ try:
+ return int(request.args[name][0])
+ except:
+ message = "Query parameter %r must be an integer" % (name,)
+ raise SynapseError(400, message)
+ else:
+ if required:
+ message = "Missing integer query parameter %r" % (name,)
+ raise SynapseError(400, message)
+ else:
+ return default
+
+
+def parse_boolean(request, name, default=None, required=False):
+ if name in request.args:
+ try:
+ return {
+ "true": True,
+ "false": False,
+ }[request.args[name][0]]
+ except:
+ message = (
+ "Boolean query parameter %r must be one of"
+ " ['true', 'false']"
+ ) % (name,)
+ raise SynapseError(400, message)
+ else:
+ if required:
+ message = "Missing boolean query parameter %r" % (name,)
+ raise SynapseError(400, message)
+ else:
+ return default
+
+
+def parse_string(request, name, default=None, required=False,
+ allowed_values=None, param_type="string"):
+ if name in request.args:
+ value = request.args[name][0]
+ if allowed_values is not None and value not in allowed_values:
+ message = "Query parameter %r must be one of [%s]" % (
+ name, ", ".join(repr(v) for v in allowed_values)
+ )
+ raise SynapseError(message)
+ else:
+ return value
+ else:
+ if required:
+ message = "Missing %s query parameter %r" % (param_type, name)
+ raise SynapseError(400, message)
+ else:
+ return default
+
+
+class RestServlet(object):
+
+ """ A Synapse REST Servlet.
+
+ An implementing class can either provide its own custom 'register' method,
+ or use the automatic pattern handling provided by the base class.
+
+ To use this latter, the implementing class instead provides a `PATTERN`
+ class attribute containing a pre-compiled regular expression. The automatic
+ register method will then use this method to register any of the following
+ instance methods associated with the corresponding HTTP method:
+
+ on_GET
+ on_PUT
+ on_POST
+ on_DELETE
+ on_OPTIONS
+
+ Automatically handles turning CodeMessageExceptions thrown by these methods
+ into the appropriate HTTP response.
+ """
+
+ def register(self, http_server):
+ """ Register this servlet with the given HTTP server. """
+ if hasattr(self, "PATTERN"):
+ pattern = self.PATTERN
+
+ for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
+ if hasattr(self, "on_%s" % (method,)):
+ method_handler = getattr(self, "on_%s" % (method,))
+ http_server.register_path(method, pattern, method_handler)
+ else:
+ raise NotImplementedError("RestServlet must register something.")
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
new file mode 100644
index 00000000..943d6374
--- /dev/null
+++ b/synapse/metrics/__init__.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Because otherwise 'resource' collides with synapse.metrics.resource
+from __future__ import absolute_import
+
+import logging
+from resource import getrusage, RUSAGE_SELF
+import functools
+import os
+import stat
+import time
+
+from twisted.internet import reactor
+
+from .metric import (
+ CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
+)
+
+
+logger = logging.getLogger(__name__)
+
+
+# We'll keep all the available metrics in a single toplevel dict, one shared
+# for the entire process. We don't currently support per-HomeServer instances
+# of metrics, because in practice any one python VM will host only one
+# HomeServer anyway. This makes a lot of implementation neater
+all_metrics = {}
+
+
+class Metrics(object):
+ """ A single Metrics object gives a (mutable) slice view of the all_metrics
+ dict, allowing callers to easily register new metrics that are namespaced
+ nicely."""
+
+ def __init__(self, name):
+ self.name_prefix = name
+
+ def _register(self, metric_class, name, *args, **kwargs):
+ full_name = "%s_%s" % (self.name_prefix, name)
+
+ metric = metric_class(full_name, *args, **kwargs)
+
+ all_metrics[full_name] = metric
+ return metric
+
+ def register_counter(self, *args, **kwargs):
+ return self._register(CounterMetric, *args, **kwargs)
+
+ def register_callback(self, *args, **kwargs):
+ return self._register(CallbackMetric, *args, **kwargs)
+
+ def register_distribution(self, *args, **kwargs):
+ return self._register(DistributionMetric, *args, **kwargs)
+
+ def register_cache(self, *args, **kwargs):
+ return self._register(CacheMetric, *args, **kwargs)
+
+
+def get_metrics_for(pkg_name):
+ """ Returns a Metrics instance for conveniently creating metrics
+ namespaced with the given name prefix. """
+
+ # Convert a "package.name" to "package_name" because Prometheus doesn't
+ # let us use . in metric names
+ return Metrics(pkg_name.replace(".", "_"))
+
+
+def render_all():
+ strs = []
+
+ # TODO(paul): Internal hack
+ update_resource_metrics()
+
+ for name in sorted(all_metrics.keys()):
+ try:
+ strs += all_metrics[name].render()
+ except Exception:
+ strs += ["# FAILED to render %s" % name]
+ logger.exception("Failed to render %s metric", name)
+
+ strs.append("") # to generate a final CRLF
+
+ return "\n".join(strs)
+
+
+# Now register some standard process-wide state metrics, to give indications of
+# process resource usage
+
+rusage = None
+
+
+def update_resource_metrics():
+ global rusage
+ rusage = getrusage(RUSAGE_SELF)
+
+resource_metrics = get_metrics_for("process.resource")
+
+# msecs
+resource_metrics.register_callback("utime", lambda: rusage.ru_utime * 1000)
+resource_metrics.register_callback("stime", lambda: rusage.ru_stime * 1000)
+
+# kilobytes
+resource_metrics.register_callback("maxrss", lambda: rusage.ru_maxrss * 1024)
+
+TYPES = {
+ stat.S_IFSOCK: "SOCK",
+ stat.S_IFLNK: "LNK",
+ stat.S_IFREG: "REG",
+ stat.S_IFBLK: "BLK",
+ stat.S_IFDIR: "DIR",
+ stat.S_IFCHR: "CHR",
+ stat.S_IFIFO: "FIFO",
+}
+
+
+def _process_fds():
+ counts = {(k,): 0 for k in TYPES.values()}
+ counts[("other",)] = 0
+
+ # Not every OS will have a /proc/self/fd directory
+ if not os.path.exists("/proc/self/fd"):
+ return counts
+
+ for fd in os.listdir("/proc/self/fd"):
+ try:
+ s = os.stat("/proc/self/fd/%s" % (fd))
+ fmt = stat.S_IFMT(s.st_mode)
+ if fmt in TYPES:
+ t = TYPES[fmt]
+ else:
+ t = "other"
+
+ counts[(t,)] += 1
+ except OSError:
+ # the dirh itself used by listdir() is usually missing by now
+ pass
+
+ return counts
+
+get_metrics_for("process").register_callback("fds", _process_fds, labels=["type"])
+
+reactor_metrics = get_metrics_for("reactor")
+tick_time = reactor_metrics.register_distribution("tick_time")
+pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
+
+
+def runUntilCurrentTimer(func):
+
+ @functools.wraps(func)
+ def f(*args, **kwargs):
+ now = reactor.seconds()
+ num_pending = 0
+
+ # _newTimedCalls is one long list of *all* pending calls. Below loop
+ # is based off of impl of reactor.runUntilCurrent
+ for delayed_call in reactor._newTimedCalls:
+ if delayed_call.time > now:
+ break
+
+ if delayed_call.delayed_time > 0:
+ continue
+
+ num_pending += 1
+
+ num_pending += len(reactor.threadCallQueue)
+
+ start = time.time() * 1000
+ ret = func(*args, **kwargs)
+ end = time.time() * 1000
+ tick_time.inc_by(end - start)
+ pending_calls_metric.inc_by(num_pending)
+ return ret
+
+ return f
+
+
+try:
+ # Ensure the reactor has all the attributes we expect
+ reactor.runUntilCurrent
+ reactor._newTimedCalls
+ reactor.threadCallQueue
+
+ # runUntilCurrent is called when we have pending calls. It is called once
+ # per iteratation after fd polling.
+ reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
+except AttributeError:
+ pass
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
new file mode 100644
index 00000000..21b37748
--- /dev/null
+++ b/synapse/metrics/metric.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from itertools import chain
+
+
+# TODO(paul): I can't believe Python doesn't have one of these
+def map_concat(func, items):
+ # flatten a list-of-lists
+ return list(chain.from_iterable(map(func, items)))
+
+
+class BaseMetric(object):
+
+ def __init__(self, name, labels=[]):
+ self.name = name
+ self.labels = labels # OK not to clone as we never write it
+
+ def dimension(self):
+ return len(self.labels)
+
+ def is_scalar(self):
+ return not len(self.labels)
+
+ def _render_labelvalue(self, value):
+ # TODO: some kind of value escape
+ return '"%s"' % (value)
+
+ def _render_key(self, values):
+ if self.is_scalar():
+ return ""
+ return "{%s}" % (
+ ",".join(["%s=%s" % (k, self._render_labelvalue(v))
+ for k, v in zip(self.labels, values)])
+ )
+
+ def render(self):
+ return map_concat(self.render_item, sorted(self.counts.keys()))
+
+
+class CounterMetric(BaseMetric):
+ """The simplest kind of metric; one that stores a monotonically-increasing
+ integer that counts events."""
+
+ def __init__(self, *args, **kwargs):
+ super(CounterMetric, self).__init__(*args, **kwargs)
+
+ self.counts = {}
+
+ # Scalar metrics are never empty
+ if self.is_scalar():
+ self.counts[()] = 0
+
+ def inc_by(self, incr, *values):
+ if len(values) != self.dimension():
+ raise ValueError(
+ "Expected as many values to inc() as labels (%d)" % (self.dimension())
+ )
+
+ # TODO: should assert that the tag values are all strings
+
+ if values not in self.counts:
+ self.counts[values] = incr
+ else:
+ self.counts[values] += incr
+
+ def inc(self, *values):
+ self.inc_by(1, *values)
+
+ def render_item(self, k):
+ return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])]
+
+
+class CallbackMetric(BaseMetric):
+ """A metric that returns the numeric value returned by a callback whenever
+ it is rendered. Typically this is used to implement gauges that yield the
+ size or other state of some in-memory object by actively querying it."""
+
+ def __init__(self, name, callback, labels=[]):
+ super(CallbackMetric, self).__init__(name, labels=labels)
+
+ self.callback = callback
+
+ def render(self):
+ value = self.callback()
+
+ if self.is_scalar():
+ return ["%s %d" % (self.name, value)]
+
+ return ["%s%s %d" % (self.name, self._render_key(k), value[k])
+ for k in sorted(value.keys())]
+
+
+class DistributionMetric(object):
+ """A combination of an event counter and an accumulator, which counts
+ both the number of events and accumulates the total value. Typically this
+ could be used to keep track of method-running times, or other distributions
+ of values that occur in discrete occurances.
+
+ TODO(paul): Try to export some heatmap-style stats?
+ """
+
+ def __init__(self, name, *args, **kwargs):
+ self.counts = CounterMetric(name + ":count", **kwargs)
+ self.totals = CounterMetric(name + ":total", **kwargs)
+
+ def inc_by(self, inc, *values):
+ self.counts.inc(*values)
+ self.totals.inc_by(inc, *values)
+
+ def render(self):
+ return self.counts.render() + self.totals.render()
+
+
+class CacheMetric(object):
+ """A combination of two CounterMetrics, one to count cache hits and one to
+ count a total, and a callback metric to yield the current size.
+
+ This metric generates standard metric name pairs, so that monitoring rules
+ can easily be applied to measure hit ratio."""
+
+ def __init__(self, name, size_callback, labels=[]):
+ self.name = name
+
+ self.hits = CounterMetric(name + ":hits", labels=labels)
+ self.total = CounterMetric(name + ":total", labels=labels)
+
+ self.size = CallbackMetric(
+ name + ":size",
+ callback=size_callback,
+ labels=labels,
+ )
+
+ def inc_hits(self, *values):
+ self.hits.inc(*values)
+ self.total.inc(*values)
+
+ def inc_misses(self, *values):
+ self.total.inc(*values)
+
+ def render(self):
+ return self.hits.render() + self.total.render() + self.size.render()
diff --git a/synapse/metrics/resource.py b/synapse/metrics/resource.py
new file mode 100644
index 00000000..0af4b3eb
--- /dev/null
+++ b/synapse/metrics/resource.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.web.resource import Resource
+
+import synapse.metrics
+
+
+METRICS_PREFIX = "/_synapse/metrics"
+
+
+class MetricsResource(Resource):
+ isLeaf = True
+
+ def __init__(self, hs):
+ Resource.__init__(self) # Resource is old-style, so no super()
+
+ self.hs = hs
+
+ def render_GET(self, request):
+ response = synapse.metrics.render_all()
+
+ request.setHeader("Content-Type", "text/plain")
+ request.setHeader("Content-Length", str(len(response)))
+
+ # Encode as UTF-8 (default)
+ return response.encode()
diff --git a/synapse/notifier.py b/synapse/notifier.py
new file mode 100644
index 00000000..e3b42e23
--- /dev/null
+++ b/synapse/notifier.py
@@ -0,0 +1,453 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError
+
+from synapse.util.logutils import log_function
+from synapse.util.async import run_on_reactor, ObservableDeferred
+from synapse.types import StreamToken
+import synapse.metrics
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+metrics = synapse.metrics.get_metrics_for(__name__)
+
+notified_events_counter = metrics.register_counter("notified_events")
+
+
+# TODO(paul): Should be shared somewhere
+def count(func, l):
+ """Return the number of items in l for which func returns true."""
+ n = 0
+ for x in l:
+ if func(x):
+ n += 1
+ return n
+
+
+class _NotificationListener(object):
+ """ This represents a single client connection to the events stream.
+ The events stream handler will have yielded to the deferred, so to
+ notify the handler it is sufficient to resolve the deferred.
+ """
+ __slots__ = ["deferred"]
+
+ def __init__(self, deferred):
+ self.deferred = deferred
+
+
+class _NotifierUserStream(object):
+ """This represents a user connected to the event stream.
+ It tracks the most recent stream token for that user.
+ At a given point a user may have a number of streams listening for
+ events.
+
+ This listener will also keep track of which rooms it is listening in
+ so that it can remove itself from the indexes in the Notifier class.
+ """
+
+ def __init__(self, user, rooms, current_token, time_now_ms,
+ appservice=None):
+ self.user = str(user)
+ self.appservice = appservice
+ self.rooms = set(rooms)
+ self.current_token = current_token
+ self.last_notified_ms = time_now_ms
+
+ self.notify_deferred = ObservableDeferred(defer.Deferred())
+
+ def notify(self, stream_key, stream_id, time_now_ms):
+ """Notify any listeners for this user of a new event from an
+ event source.
+ Args:
+ stream_key(str): The stream the event came from.
+ stream_id(str): The new id for the stream the event came from.
+ time_now_ms(int): The current time in milliseconds.
+ """
+ self.current_token = self.current_token.copy_and_advance(
+ stream_key, stream_id
+ )
+ self.last_notified_ms = time_now_ms
+ noify_deferred = self.notify_deferred
+ self.notify_deferred = ObservableDeferred(defer.Deferred())
+ noify_deferred.callback(self.current_token)
+
+ def remove(self, notifier):
+ """ Remove this listener from all the indexes in the Notifier
+ it knows about.
+ """
+
+ for room in self.rooms:
+ lst = notifier.room_to_user_streams.get(room, set())
+ lst.discard(self)
+
+ notifier.user_to_user_stream.pop(self.user)
+
+ if self.appservice:
+ notifier.appservice_to_user_streams.get(
+ self.appservice, set()
+ ).discard(self)
+
+ def count_listeners(self):
+ return len(self.notify_deferred.observers())
+
+ def new_listener(self, token):
+ """Returns a deferred that is resolved when there is a new token
+ greater than the given token.
+ """
+ if self.current_token.is_after(token):
+ return _NotificationListener(defer.succeed(self.current_token))
+ else:
+ return _NotificationListener(self.notify_deferred.observe())
+
+
+class Notifier(object):
+ """ This class is responsible for notifying any listeners when there are
+ new events available for it.
+
+ Primarily used from the /events stream.
+ """
+
+ UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
+
+ def __init__(self, hs):
+ self.hs = hs
+
+ self.user_to_user_stream = {}
+ self.room_to_user_streams = {}
+ self.appservice_to_user_streams = {}
+
+ self.event_sources = hs.get_event_sources()
+ self.store = hs.get_datastore()
+ self.pending_new_room_events = []
+
+ self.clock = hs.get_clock()
+
+ hs.get_distributor().observe(
+ "user_joined_room", self._user_joined_room
+ )
+
+ self.clock.looping_call(
+ self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
+ )
+
+ # This is not a very cheap test to perform, but it's only executed
+ # when rendering the metrics page, which is likely once per minute at
+ # most when scraping it.
+ def count_listeners():
+ all_user_streams = set()
+
+ for x in self.room_to_user_streams.values():
+ all_user_streams |= x
+ for x in self.user_to_user_stream.values():
+ all_user_streams.add(x)
+ for x in self.appservice_to_user_streams.values():
+ all_user_streams |= x
+
+ return sum(stream.count_listeners() for stream in all_user_streams)
+ metrics.register_callback("listeners", count_listeners)
+
+ metrics.register_callback(
+ "rooms",
+ lambda: count(bool, self.room_to_user_streams.values()),
+ )
+ metrics.register_callback(
+ "users",
+ lambda: len(self.user_to_user_stream),
+ )
+ metrics.register_callback(
+ "appservices",
+ lambda: count(bool, self.appservice_to_user_streams.values()),
+ )
+
+ @log_function
+ @defer.inlineCallbacks
+ def on_new_room_event(self, event, room_stream_id, max_room_stream_id,
+ extra_users=[]):
+ """ Used by handlers to inform the notifier something has happened
+ in the room, room event wise.
+
+ This triggers the notifier to wake up any listeners that are
+ listening to the room, and any listeners for the users in the
+ `extra_users` param.
+
+ The events can be peristed out of order. The notifier will wait
+ until all previous events have been persisted before notifying
+ the client streams.
+ """
+ yield run_on_reactor()
+
+ self.pending_new_room_events.append((
+ room_stream_id, event, extra_users
+ ))
+ self._notify_pending_new_room_events(max_room_stream_id)
+
+ def _notify_pending_new_room_events(self, max_room_stream_id):
+ """Notify for the room events that were queued waiting for a previous
+ event to be persisted.
+ Args:
+ max_room_stream_id(int): The highest stream_id below which all
+ events have been persisted.
+ """
+ pending = self.pending_new_room_events
+ self.pending_new_room_events = []
+ for room_stream_id, event, extra_users in pending:
+ if room_stream_id > max_room_stream_id:
+ self.pending_new_room_events.append((
+ room_stream_id, event, extra_users
+ ))
+ else:
+ self._on_new_room_event(event, room_stream_id, extra_users)
+
+ def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
+ """Notify any user streams that are interested in this room event"""
+ # poke any interested application service.
+ self.hs.get_handlers().appservice_handler.notify_interested_services(
+ event
+ )
+
+ app_streams = set()
+
+ for appservice in self.appservice_to_user_streams:
+ # TODO (kegan): Redundant appservice listener checks?
+ # App services will already be in the room_to_user_streams set, but
+ # that isn't enough. They need to be checked here in order to
+ # receive *invites* for users they are interested in. Does this
+ # make the room_to_user_streams check somewhat obselete?
+ if appservice.is_interested(event):
+ app_user_streams = self.appservice_to_user_streams.get(
+ appservice, set()
+ )
+ app_streams |= app_user_streams
+
+ self.on_new_event(
+ "room_key", room_stream_id,
+ users=extra_users,
+ rooms=[event.room_id],
+ extra_streams=app_streams,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def on_new_event(self, stream_key, new_token, users=[], rooms=[],
+ extra_streams=set()):
+ """ Used to inform listeners that something has happend event wise.
+
+ Will wake up all listeners for the given users and rooms.
+ """
+ yield run_on_reactor()
+ user_streams = set()
+
+ for user in users:
+ user_stream = self.user_to_user_stream.get(str(user))
+ if user_stream is not None:
+ user_streams.add(user_stream)
+
+ for room in rooms:
+ user_streams |= self.room_to_user_streams.get(room, set())
+
+ time_now_ms = self.clock.time_msec()
+ for user_stream in user_streams:
+ try:
+ user_stream.notify(stream_key, new_token, time_now_ms)
+ except:
+ logger.exception("Failed to notify listener")
+
+ @defer.inlineCallbacks
+ def wait_for_events(self, user, timeout, callback, room_ids=None,
+ from_token=StreamToken("s0", "0", "0", "0", "0")):
+ """Wait until the callback returns a non empty response or the
+ timeout fires.
+ """
+ user = str(user)
+ user_stream = self.user_to_user_stream.get(user)
+ if user_stream is None:
+ appservice = yield self.store.get_app_service_by_user_id(user)
+ current_token = yield self.event_sources.get_current_token()
+ if room_ids is None:
+ rooms = yield self.store.get_rooms_for_user(user)
+ room_ids = [room.room_id for room in rooms]
+ user_stream = _NotifierUserStream(
+ user=user,
+ rooms=room_ids,
+ appservice=appservice,
+ current_token=current_token,
+ time_now_ms=self.clock.time_msec(),
+ )
+ self._register_with_keys(user_stream)
+
+ result = None
+ if timeout:
+ # Will be set to a _NotificationListener that we'll be waiting on.
+ # Allows us to cancel it.
+ listener = None
+
+ def timed_out():
+ if listener:
+ listener.deferred.cancel()
+ timer = self.clock.call_later(timeout/1000., timed_out)
+
+ prev_token = from_token
+ while not result:
+ try:
+ current_token = user_stream.current_token
+
+ result = yield callback(prev_token, current_token)
+ if result:
+ break
+
+ # Now we wait for the _NotifierUserStream to be told there
+ # is a new token.
+ # We need to supply the token we supplied to callback so
+ # that we don't miss any current_token updates.
+ prev_token = current_token
+ listener = user_stream.new_listener(prev_token)
+ yield listener.deferred
+ except defer.CancelledError:
+ break
+
+ self.clock.cancel_call_later(timer, ignore_errs=True)
+ else:
+ current_token = user_stream.current_token
+ result = yield callback(from_token, current_token)
+
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def get_events_for(self, user, pagination_config, timeout,
+ only_room_events=False,
+ is_guest=False, guest_room_id=None):
+ """ For the given user and rooms, return any new events for them. If
+ there are no new events wait for up to `timeout` milliseconds for any
+ new events to happen before returning.
+
+ If `only_room_events` is `True` only room events will be returned.
+ """
+ from_token = pagination_config.from_token
+ if not from_token:
+ from_token = yield self.event_sources.get_current_token()
+
+ limit = pagination_config.limit
+
+ room_ids = []
+ if is_guest:
+ if guest_room_id:
+ if not self._is_world_readable(guest_room_id):
+ raise AuthError(403, "Guest access not allowed")
+ room_ids = [guest_room_id]
+ else:
+ rooms = yield self.store.get_rooms_for_user(user.to_string())
+ room_ids = [room.room_id for room in rooms]
+
+ @defer.inlineCallbacks
+ def check_for_updates(before_token, after_token):
+ if not after_token.is_after(before_token):
+ defer.returnValue(None)
+
+ events = []
+ end_token = from_token
+
+ for name, source in self.event_sources.sources.items():
+ keyname = "%s_key" % name
+ before_id = getattr(before_token, keyname)
+ after_id = getattr(after_token, keyname)
+ if before_id == after_id:
+ continue
+ if only_room_events and name != "room":
+ continue
+ new_events, new_key = yield source.get_new_events(
+ user=user,
+ from_key=getattr(from_token, keyname),
+ limit=limit,
+ is_guest=is_guest,
+ room_ids=room_ids,
+ )
+
+ if name == "room":
+ room_member_handler = self.hs.get_handlers().room_member_handler
+ new_events = yield room_member_handler._filter_events_for_client(
+ user.to_string(),
+ new_events,
+ is_guest=is_guest,
+ require_all_visible_for_guests=False
+ )
+
+ events.extend(new_events)
+ end_token = end_token.copy_and_replace(keyname, new_key)
+
+ if events:
+ defer.returnValue((events, (from_token, end_token)))
+ else:
+ defer.returnValue(None)
+
+ result = yield self.wait_for_events(
+ user, timeout, check_for_updates, room_ids=room_ids, from_token=from_token
+ )
+
+ if result is None:
+ result = ([], (from_token, from_token))
+
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _is_world_readable(self, room_id):
+ state = yield self.hs.get_state_handler().get_current_state(
+ room_id,
+ EventTypes.RoomHistoryVisibility
+ )
+ if state and "history_visibility" in state.content:
+ defer.returnValue(state.content["history_visibility"] == "world_readable")
+ else:
+ defer.returnValue(False)
+
+ @log_function
+ def remove_expired_streams(self):
+ time_now_ms = self.clock.time_msec()
+ expired_streams = []
+ expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
+ for stream in self.user_to_user_stream.values():
+ if stream.count_listeners():
+ continue
+ if stream.last_notified_ms < expire_before_ts:
+ expired_streams.append(stream)
+
+ for expired_stream in expired_streams:
+ expired_stream.remove(self)
+
+ @log_function
+ def _register_with_keys(self, user_stream):
+ self.user_to_user_stream[user_stream.user] = user_stream
+
+ for room in user_stream.rooms:
+ s = self.room_to_user_streams.setdefault(room, set())
+ s.add(user_stream)
+
+ if user_stream.appservice:
+ self.appservice_to_user_stream.setdefault(
+ user_stream.appservice, set()
+ ).add(user_stream)
+
+ def _user_joined_room(self, user, room_id):
+ user = str(user)
+ new_user_stream = self.user_to_user_stream.get(user)
+ if new_user_stream is not None:
+ room_streams = self.room_to_user_streams.setdefault(room_id, set())
+ room_streams.add(new_user_stream)
+ new_user_stream.rooms.add(room_id)
diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py
new file mode 100644
index 00000000..0e0c61de
--- /dev/null
+++ b/synapse/push/__init__.py
@@ -0,0 +1,474 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.streams.config import PaginationConfig
+from synapse.types import StreamToken, UserID
+
+import synapse.util.async
+import baserules
+
+import logging
+import simplejson as json
+import re
+import random
+
+logger = logging.getLogger(__name__)
+
+
+class Pusher(object):
+ INITIAL_BACKOFF = 1000
+ MAX_BACKOFF = 60 * 60 * 1000
+ GIVE_UP_AFTER = 24 * 60 * 60 * 1000
+ DEFAULT_ACTIONS = ['dont_notify']
+
+ INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
+
+ def __init__(self, _hs, profile_tag, user_name, app_id,
+ app_display_name, device_display_name, pushkey, pushkey_ts,
+ data, last_token, last_success, failing_since):
+ self.hs = _hs
+ self.evStreamHandler = self.hs.get_handlers().event_stream_handler
+ self.store = self.hs.get_datastore()
+ self.clock = self.hs.get_clock()
+ self.profile_tag = profile_tag
+ self.user_name = user_name
+ self.app_id = app_id
+ self.app_display_name = app_display_name
+ self.device_display_name = device_display_name
+ self.pushkey = pushkey
+ self.pushkey_ts = pushkey_ts
+ self.data = data
+ self.last_token = last_token
+ self.last_success = last_success # not actually used
+ self.backoff_delay = Pusher.INITIAL_BACKOFF
+ self.failing_since = failing_since
+ self.alive = True
+
+ # The last value of last_active_time that we saw
+ self.last_last_active_time = 0
+ self.has_unread = True
+
+ @defer.inlineCallbacks
+ def _actions_for_event(self, ev):
+ """
+ This should take into account notification settings that the user
+ has configured both globally and per-room when we have the ability
+ to do such things.
+ """
+ if ev['user_id'] == self.user_name:
+ # let's assume you probably know about messages you sent yourself
+ defer.returnValue(['dont_notify'])
+
+ rawrules = yield self.store.get_push_rules_for_user(self.user_name)
+
+ rules = []
+ for rawrule in rawrules:
+ rule = dict(rawrule)
+ rule['conditions'] = json.loads(rawrule['conditions'])
+ rule['actions'] = json.loads(rawrule['actions'])
+ rules.append(rule)
+
+ enabled_map = yield self.store.get_push_rules_enabled_for_user(self.user_name)
+
+ user = UserID.from_string(self.user_name)
+
+ rules = baserules.list_with_base_rules(rules, user)
+
+ room_id = ev['room_id']
+
+ # get *our* member event for display name matching
+ my_display_name = None
+ our_member_event = yield self.store.get_current_state(
+ room_id=room_id,
+ event_type='m.room.member',
+ state_key=self.user_name,
+ )
+ if our_member_event:
+ my_display_name = our_member_event[0].content.get("displayname")
+
+ room_members = yield self.store.get_users_in_room(room_id)
+ room_member_count = len(room_members)
+
+ for r in rules:
+ if r['rule_id'] in enabled_map:
+ r['enabled'] = enabled_map[r['rule_id']]
+ elif 'enabled' not in r:
+ r['enabled'] = True
+ if not r['enabled']:
+ continue
+ matches = True
+
+ conditions = r['conditions']
+ actions = r['actions']
+
+ for c in conditions:
+ matches &= self._event_fulfills_condition(
+ ev, c, display_name=my_display_name,
+ room_member_count=room_member_count
+ )
+ logger.debug(
+ "Rule %s %s",
+ r['rule_id'], "matches" if matches else "doesn't match"
+ )
+ # ignore rules with no actions (we have an explict 'dont_notify')
+ if len(actions) == 0:
+ logger.warn(
+ "Ignoring rule id %s with no actions for user %s",
+ r['rule_id'], self.user_name
+ )
+ continue
+ if matches:
+ logger.info(
+ "%s matches for user %s, event %s",
+ r['rule_id'], self.user_name, ev['event_id']
+ )
+ defer.returnValue(actions)
+
+ logger.info(
+ "No rules match for user %s, event %s",
+ self.user_name, ev['event_id']
+ )
+ defer.returnValue(Pusher.DEFAULT_ACTIONS)
+
+ @staticmethod
+ def _glob_to_regexp(glob):
+ r = re.escape(glob)
+ r = re.sub(r'\\\*', r'.*?', r)
+ r = re.sub(r'\\\?', r'.', r)
+
+ # handle [abc], [a-z] and [!a-z] style ranges.
+ r = re.sub(r'\\\[(\\\!|)(.*)\\\]',
+ lambda x: ('[%s%s]' % (x.group(1) and '^' or '',
+ re.sub(r'\\\-', '-', x.group(2)))), r)
+ return r
+
+ def _event_fulfills_condition(self, ev, condition, display_name, room_member_count):
+ if condition['kind'] == 'event_match':
+ if 'pattern' not in condition:
+ logger.warn("event_match condition with no pattern")
+ return False
+ # XXX: optimisation: cache our pattern regexps
+ if condition['key'] == 'content.body':
+ r = r'\b%s\b' % self._glob_to_regexp(condition['pattern'])
+ else:
+ r = r'^%s$' % self._glob_to_regexp(condition['pattern'])
+ val = _value_for_dotted_key(condition['key'], ev)
+ if val is None:
+ return False
+ return re.search(r, val, flags=re.IGNORECASE) is not None
+
+ elif condition['kind'] == 'device':
+ if 'profile_tag' not in condition:
+ return True
+ return condition['profile_tag'] == self.profile_tag
+
+ elif condition['kind'] == 'contains_display_name':
+ # This is special because display names can be different
+ # between rooms and so you can't really hard code it in a rule.
+ # Optimisation: we should cache these names and update them from
+ # the event stream.
+ if 'content' not in ev or 'body' not in ev['content']:
+ return False
+ if not display_name:
+ return False
+ return re.search(
+ r"\b%s\b" % re.escape(display_name), ev['content']['body'],
+ flags=re.IGNORECASE
+ ) is not None
+
+ elif condition['kind'] == 'room_member_count':
+ if 'is' not in condition:
+ return False
+ m = Pusher.INEQUALITY_EXPR.match(condition['is'])
+ if not m:
+ return False
+ ineq = m.group(1)
+ rhs = m.group(2)
+ if not rhs.isdigit():
+ return False
+ rhs = int(rhs)
+
+ if ineq == '' or ineq == '==':
+ return room_member_count == rhs
+ elif ineq == '<':
+ return room_member_count < rhs
+ elif ineq == '>':
+ return room_member_count > rhs
+ elif ineq == '>=':
+ return room_member_count >= rhs
+ elif ineq == '<=':
+ return room_member_count <= rhs
+ else:
+ return False
+ else:
+ return True
+
+ @defer.inlineCallbacks
+ def get_context_for_event(self, ev):
+ name_aliases = yield self.store.get_room_name_and_aliases(
+ ev['room_id']
+ )
+
+ ctx = {'aliases': name_aliases[1]}
+ if name_aliases[0] is not None:
+ ctx['name'] = name_aliases[0]
+
+ their_member_events_for_room = yield self.store.get_current_state(
+ room_id=ev['room_id'],
+ event_type='m.room.member',
+ state_key=ev['user_id']
+ )
+ for mev in their_member_events_for_room:
+ if mev.content['membership'] == 'join' and 'displayname' in mev.content:
+ dn = mev.content['displayname']
+ if dn is not None:
+ ctx['sender_display_name'] = dn
+
+ defer.returnValue(ctx)
+
+ @defer.inlineCallbacks
+ def start(self):
+ if not self.last_token:
+ # First-time setup: get a token to start from (we can't
+ # just start from no token, ie. 'now'
+ # because we need the result to be reproduceable in case
+ # we fail to dispatch the push)
+ config = PaginationConfig(from_token=None, limit='1')
+ chunk = yield self.evStreamHandler.get_stream(
+ self.user_name, config, timeout=0, affect_presence=False,
+ only_room_events=True
+ )
+ self.last_token = chunk['end']
+ self.store.update_pusher_last_token(
+ self.app_id, self.pushkey, self.user_name, self.last_token
+ )
+ logger.info("Pusher %s for user %s starting from token %s",
+ self.pushkey, self.user_name, self.last_token)
+
+ wait = 0
+ while self.alive:
+ try:
+ if wait > 0:
+ yield synapse.util.async.sleep(wait)
+ yield self.get_and_dispatch()
+ wait = 0
+ except:
+ if wait == 0:
+ wait = 1
+ else:
+ wait = min(wait * 2, 1800)
+ logger.exception(
+ "Exception in pusher loop for pushkey %s. Pausing for %ds",
+ self.pushkey, wait
+ )
+
+ @defer.inlineCallbacks
+ def get_and_dispatch(self):
+ from_tok = StreamToken.from_string(self.last_token)
+ config = PaginationConfig(from_token=from_tok, limit='1')
+ timeout = (300 + random.randint(-60, 60)) * 1000
+ chunk = yield self.evStreamHandler.get_stream(
+ self.user_name, config, timeout=timeout, affect_presence=False,
+ only_room_events=True
+ )
+
+ # limiting to 1 may get 1 event plus 1 presence event, so
+ # pick out the actual event
+ single_event = None
+ for c in chunk['chunk']:
+ if 'event_id' in c: # Hmmm...
+ single_event = c
+ break
+ if not single_event:
+ self.last_token = chunk['end']
+ logger.debug("Event stream timeout for pushkey %s", self.pushkey)
+ yield self.store.update_pusher_last_token(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.last_token
+ )
+ return
+
+ if not self.alive:
+ return
+
+ processed = False
+ actions = yield self._actions_for_event(single_event)
+ tweaks = _tweaks_for_actions(actions)
+
+ if len(actions) == 0:
+ logger.warn("Empty actions! Using default action.")
+ actions = Pusher.DEFAULT_ACTIONS
+
+ if 'notify' not in actions and 'dont_notify' not in actions:
+ logger.warn("Neither notify nor dont_notify in actions: adding default")
+ actions.extend(Pusher.DEFAULT_ACTIONS)
+
+ if 'dont_notify' in actions:
+ logger.debug(
+ "%s for %s: dont_notify",
+ single_event['event_id'], self.user_name
+ )
+ processed = True
+ else:
+ rejected = yield self.dispatch_push(single_event, tweaks)
+ self.has_unread = True
+ if isinstance(rejected, list) or isinstance(rejected, tuple):
+ processed = True
+ for pk in rejected:
+ if pk != self.pushkey:
+ # for sanity, we only remove the pushkey if it
+ # was the one we actually sent...
+ logger.warn(
+ ("Ignoring rejected pushkey %s because we"
+ " didn't send it"), pk
+ )
+ else:
+ logger.info(
+ "Pushkey %s was rejected: removing",
+ pk
+ )
+ yield self.hs.get_pusherpool().remove_pusher(
+ self.app_id, pk, self.user_name
+ )
+
+ if not self.alive:
+ return
+
+ if processed:
+ self.backoff_delay = Pusher.INITIAL_BACKOFF
+ self.last_token = chunk['end']
+ yield self.store.update_pusher_last_token_and_success(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.last_token,
+ self.clock.time_msec()
+ )
+ if self.failing_since:
+ self.failing_since = None
+ yield self.store.update_pusher_failing_since(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.failing_since)
+ else:
+ if not self.failing_since:
+ self.failing_since = self.clock.time_msec()
+ yield self.store.update_pusher_failing_since(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.failing_since
+ )
+
+ if (self.failing_since and
+ self.failing_since <
+ self.clock.time_msec() - Pusher.GIVE_UP_AFTER):
+ # we really only give up so that if the URL gets
+ # fixed, we don't suddenly deliver a load
+ # of old notifications.
+ logger.warn("Giving up on a notification to user %s, "
+ "pushkey %s",
+ self.user_name, self.pushkey)
+ self.backoff_delay = Pusher.INITIAL_BACKOFF
+ self.last_token = chunk['end']
+ yield self.store.update_pusher_last_token(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.last_token
+ )
+
+ self.failing_since = None
+ yield self.store.update_pusher_failing_since(
+ self.app_id,
+ self.pushkey,
+ self.user_name,
+ self.failing_since
+ )
+ else:
+ logger.warn("Failed to dispatch push for user %s "
+ "(failing for %dms)."
+ "Trying again in %dms",
+ self.user_name,
+ self.clock.time_msec() - self.failing_since,
+ self.backoff_delay)
+ yield synapse.util.async.sleep(self.backoff_delay / 1000.0)
+ self.backoff_delay *= 2
+ if self.backoff_delay > Pusher.MAX_BACKOFF:
+ self.backoff_delay = Pusher.MAX_BACKOFF
+
+ def stop(self):
+ self.alive = False
+
+ def dispatch_push(self, p, tweaks):
+ """
+ Overridden by implementing classes to actually deliver the notification
+ Args:
+ p: The event to notify for as a single event from the event stream
+ Returns: If the notification was delivered, an array containing any
+ pushkeys that were rejected by the push gateway.
+ False if the notification could not be delivered (ie.
+ should be retried).
+ """
+ pass
+
+ def reset_badge_count(self):
+ pass
+
+ def presence_changed(self, state):
+ """
+ We clear badge counts whenever a user's last_active time is bumped
+ This is by no means perfect but I think it's the best we can do
+ without read receipts.
+ """
+ if 'last_active' in state.state:
+ last_active = state.state['last_active']
+ if last_active > self.last_last_active_time:
+ self.last_last_active_time = last_active
+ if self.has_unread:
+ logger.info("Resetting badge count for %s", self.user_name)
+ self.reset_badge_count()
+ self.has_unread = False
+
+
+def _value_for_dotted_key(dotted_key, event):
+ parts = dotted_key.split(".")
+ val = event
+ while len(parts) > 0:
+ if parts[0] not in val:
+ return None
+ val = val[parts[0]]
+ parts = parts[1:]
+ return val
+
+
+def _tweaks_for_actions(actions):
+ tweaks = {}
+ for a in actions:
+ if not isinstance(a, dict):
+ continue
+ if 'set_tweak' in a and 'value' in a:
+ tweaks[a['set_tweak']] = a['value']
+ return tweaks
+
+
+class PusherConfigException(Exception):
+ def __init__(self, msg):
+ super(PusherConfigException, self).__init__(msg)
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
new file mode 100644
index 00000000..1f015a7f
--- /dev/null
+++ b/synapse/push/baserules.py
@@ -0,0 +1,264 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
+
+
+def list_with_base_rules(rawrules, user_name):
+ ruleslist = []
+
+ # shove the server default rules for each kind onto the end of each
+ current_prio_class = PRIORITY_CLASS_INVERSE_MAP.keys()[-1]
+
+ ruleslist.extend(make_base_prepend_rules(
+ user_name, PRIORITY_CLASS_INVERSE_MAP[current_prio_class]
+ ))
+
+ for r in rawrules:
+ if r['priority_class'] < current_prio_class:
+ while r['priority_class'] < current_prio_class:
+ ruleslist.extend(make_base_append_rules(
+ user_name,
+ PRIORITY_CLASS_INVERSE_MAP[current_prio_class]
+ ))
+ current_prio_class -= 1
+ if current_prio_class > 0:
+ ruleslist.extend(make_base_prepend_rules(
+ user_name,
+ PRIORITY_CLASS_INVERSE_MAP[current_prio_class]
+ ))
+
+ ruleslist.append(r)
+
+ while current_prio_class > 0:
+ ruleslist.extend(make_base_append_rules(
+ user_name,
+ PRIORITY_CLASS_INVERSE_MAP[current_prio_class]
+ ))
+ current_prio_class -= 1
+ if current_prio_class > 0:
+ ruleslist.extend(make_base_prepend_rules(
+ user_name,
+ PRIORITY_CLASS_INVERSE_MAP[current_prio_class]
+ ))
+
+ return ruleslist
+
+
+def make_base_append_rules(user, kind):
+ rules = []
+
+ if kind == 'override':
+ rules = make_base_append_override_rules()
+ elif kind == 'underride':
+ rules = make_base_append_underride_rules(user)
+ elif kind == 'content':
+ rules = make_base_append_content_rules(user)
+
+ for r in rules:
+ r['priority_class'] = PRIORITY_CLASS_MAP[kind]
+ r['default'] = True # Deprecated, left for backwards compat
+
+ return rules
+
+
+def make_base_prepend_rules(user, kind):
+ rules = []
+
+ if kind == 'override':
+ rules = make_base_prepend_override_rules()
+
+ for r in rules:
+ r['priority_class'] = PRIORITY_CLASS_MAP[kind]
+ r['default'] = True # Deprecated, left for backwards compat
+
+ return rules
+
+
+def make_base_append_content_rules(user):
+ return [
+ {
+ 'rule_id': 'global/content/.m.rule.contains_user_name',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'content.body',
+ 'pattern': user.localpart, # Matrix ID match
+ }
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'default',
+ }, {
+ 'set_tweak': 'highlight'
+ }
+ ]
+ },
+ ]
+
+
+def make_base_prepend_override_rules():
+ return [
+ {
+ 'rule_id': 'global/override/.m.rule.master',
+ 'enabled': False,
+ 'conditions': [],
+ 'actions': [
+ "dont_notify"
+ ]
+ }
+ ]
+
+
+def make_base_append_override_rules():
+ return [
+ {
+ 'rule_id': 'global/override/.m.rule.suppress_notices',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'content.msgtype',
+ 'pattern': 'm.notice',
+ }
+ ],
+ 'actions': [
+ 'dont_notify',
+ ]
+ }
+ ]
+
+
+def make_base_append_underride_rules(user):
+ return [
+ {
+ 'rule_id': 'global/underride/.m.rule.call',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.call.invite',
+ }
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'ring'
+ }, {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ },
+ {
+ 'rule_id': 'global/underride/.m.rule.contains_display_name',
+ 'conditions': [
+ {
+ 'kind': 'contains_display_name'
+ }
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'default'
+ }, {
+ 'set_tweak': 'highlight'
+ }
+ ]
+ },
+ {
+ 'rule_id': 'global/underride/.m.rule.room_one_to_one',
+ 'conditions': [
+ {
+ 'kind': 'room_member_count',
+ 'is': '2'
+ }
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'default'
+ }, {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ },
+ {
+ 'rule_id': 'global/underride/.m.rule.invite_for_me',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.member',
+ },
+ {
+ 'kind': 'event_match',
+ 'key': 'content.membership',
+ 'pattern': 'invite',
+ },
+ {
+ 'kind': 'event_match',
+ 'key': 'state_key',
+ 'pattern': user.to_string(),
+ },
+ ],
+ 'actions': [
+ 'notify',
+ {
+ 'set_tweak': 'sound',
+ 'value': 'default'
+ }, {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ },
+ {
+ 'rule_id': 'global/underride/.m.rule.member_event',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.member',
+ }
+ ],
+ 'actions': [
+ 'notify', {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ },
+ {
+ 'rule_id': 'global/underride/.m.rule.message',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.message',
+ }
+ ],
+ 'actions': [
+ 'notify', {
+ 'set_tweak': 'highlight',
+ 'value': False
+ }
+ ]
+ }
+ ]
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
new file mode 100644
index 00000000..a02fed57
--- /dev/null
+++ b/synapse/push/httppusher.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.push import Pusher, PusherConfigException
+from synapse.http.client import SimpleHttpClient
+
+from twisted.internet import defer
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class HttpPusher(Pusher):
+ def __init__(self, _hs, profile_tag, user_name, app_id,
+ app_display_name, device_display_name, pushkey, pushkey_ts,
+ data, last_token, last_success, failing_since):
+ super(HttpPusher, self).__init__(
+ _hs,
+ profile_tag,
+ user_name,
+ app_id,
+ app_display_name,
+ device_display_name,
+ pushkey,
+ pushkey_ts,
+ data,
+ last_token,
+ last_success,
+ failing_since
+ )
+ if 'url' not in data:
+ raise PusherConfigException(
+ "'url' required in data for HTTP pusher"
+ )
+ self.url = data['url']
+ self.httpCli = SimpleHttpClient(self.hs)
+ self.data_minus_url = {}
+ self.data_minus_url.update(self.data)
+ del self.data_minus_url['url']
+
+ @defer.inlineCallbacks
+ def _build_notification_dict(self, event, tweaks):
+ # we probably do not want to push for every presence update
+ # (we may want to be able to set up notifications when specific
+ # people sign in, but we'd want to only deliver the pertinent ones)
+ # Actually, presence events will not get this far now because we
+ # need to filter them out in the main Pusher code.
+ if 'event_id' not in event:
+ defer.returnValue(None)
+
+ ctx = yield self.get_context_for_event(event)
+
+ d = {
+ 'notification': {
+ 'id': event['event_id'],
+ 'room_id': event['room_id'],
+ 'type': event['type'],
+ 'sender': event['user_id'],
+ 'counts': { # -- we don't mark messages as read yet so
+ # we have no way of knowing
+ # Just set the badge to 1 until we have read receipts
+ 'unread': 1,
+ # 'missed_calls': 2
+ },
+ 'devices': [
+ {
+ 'app_id': self.app_id,
+ 'pushkey': self.pushkey,
+ 'pushkey_ts': long(self.pushkey_ts / 1000),
+ 'data': self.data_minus_url,
+ 'tweaks': tweaks
+ }
+ ]
+ }
+ }
+ if event['type'] == 'm.room.member':
+ d['notification']['membership'] = event['content']['membership']
+ d['notification']['user_is_target'] = event['state_key'] == self.user_name
+ if 'content' in event:
+ d['notification']['content'] = event['content']
+
+ if len(ctx['aliases']):
+ d['notification']['room_alias'] = ctx['aliases'][0]
+ if 'sender_display_name' in ctx and len(ctx['sender_display_name']) > 0:
+ d['notification']['sender_display_name'] = ctx['sender_display_name']
+ if 'name' in ctx and len(ctx['name']) > 0:
+ d['notification']['room_name'] = ctx['name']
+
+ defer.returnValue(d)
+
+ @defer.inlineCallbacks
+ def dispatch_push(self, event, tweaks):
+ notification_dict = yield self._build_notification_dict(event, tweaks)
+ if not notification_dict:
+ defer.returnValue([])
+ try:
+ resp = yield self.httpCli.post_json_get_json(self.url, notification_dict)
+ except:
+ logger.warn("Failed to push %s ", self.url)
+ defer.returnValue(False)
+ rejected = []
+ if 'rejected' in resp:
+ rejected = resp['rejected']
+ defer.returnValue(rejected)
+
+ @defer.inlineCallbacks
+ def reset_badge_count(self):
+ d = {
+ 'notification': {
+ 'id': '',
+ 'type': None,
+ 'sender': '',
+ 'counts': {
+ 'unread': 0,
+ 'missed_calls': 0
+ },
+ 'devices': [
+ {
+ 'app_id': self.app_id,
+ 'pushkey': self.pushkey,
+ 'pushkey_ts': long(self.pushkey_ts / 1000),
+ 'data': self.data_minus_url,
+ }
+ ]
+ }
+ }
+ try:
+ resp = yield self.httpCli.post_json_get_json(self.url, d)
+ except:
+ logger.exception("Failed to push %s ", self.url)
+ defer.returnValue(False)
+ rejected = []
+ if 'rejected' in resp:
+ rejected = resp['rejected']
+ defer.returnValue(rejected)
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
new file mode 100644
index 00000000..e012c565
--- /dev/null
+++ b/synapse/push/pusherpool.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from httppusher import HttpPusher
+from synapse.push import PusherConfigException
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class PusherPool:
+ def __init__(self, _hs):
+ self.hs = _hs
+ self.store = self.hs.get_datastore()
+ self.pushers = {}
+ self.last_pusher_started = -1
+
+ distributor = self.hs.get_distributor()
+ distributor.observe(
+ "user_presence_changed", self.user_presence_changed
+ )
+
+ @defer.inlineCallbacks
+ def user_presence_changed(self, user, state):
+ user_name = user.to_string()
+
+ # until we have read receipts, pushers use this to reset a user's
+ # badge counters to zero
+ for p in self.pushers.values():
+ if p.user_name == user_name:
+ yield p.presence_changed(state)
+
+ @defer.inlineCallbacks
+ def start(self):
+ pushers = yield self.store.get_all_pushers()
+ self._start_pushers(pushers)
+
+ @defer.inlineCallbacks
+ def add_pusher(self, user_name, access_token, profile_tag, kind, app_id,
+ app_display_name, device_display_name, pushkey, lang, data):
+ # we try to create the pusher just to validate the config: it
+ # will then get pulled out of the database,
+ # recreated, added and started: this means we have only one
+ # code path adding pushers.
+ self._create_pusher({
+ "user_name": user_name,
+ "kind": kind,
+ "profile_tag": profile_tag,
+ "app_id": app_id,
+ "app_display_name": app_display_name,
+ "device_display_name": device_display_name,
+ "pushkey": pushkey,
+ "ts": self.hs.get_clock().time_msec(),
+ "lang": lang,
+ "data": data,
+ "last_token": None,
+ "last_success": None,
+ "failing_since": None
+ })
+ yield self._add_pusher_to_store(
+ user_name, access_token, profile_tag, kind, app_id,
+ app_display_name, device_display_name,
+ pushkey, lang, data
+ )
+
+ @defer.inlineCallbacks
+ def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey,
+ not_user_id):
+ to_remove = yield self.store.get_pushers_by_app_id_and_pushkey(
+ app_id, pushkey
+ )
+ for p in to_remove:
+ if p['user_name'] != not_user_id:
+ logger.info(
+ "Removing pusher for app id %s, pushkey %s, user %s",
+ app_id, pushkey, p['user_name']
+ )
+ self.remove_pusher(p['app_id'], p['pushkey'], p['user_name'])
+
+ @defer.inlineCallbacks
+ def remove_pushers_by_user(self, user_id):
+ all = yield self.store.get_all_pushers()
+ logger.info(
+ "Removing all pushers for user %s",
+ user_id,
+ )
+ for p in all:
+ if p['user_name'] == user_id:
+ logger.info(
+ "Removing pusher for app id %s, pushkey %s, user %s",
+ p['app_id'], p['pushkey'], p['user_name']
+ )
+ self.remove_pusher(p['app_id'], p['pushkey'], p['user_name'])
+
+ @defer.inlineCallbacks
+ def _add_pusher_to_store(self, user_name, access_token, profile_tag, kind,
+ app_id, app_display_name, device_display_name,
+ pushkey, lang, data):
+ yield self.store.add_pusher(
+ user_name=user_name,
+ access_token=access_token,
+ profile_tag=profile_tag,
+ kind=kind,
+ app_id=app_id,
+ app_display_name=app_display_name,
+ device_display_name=device_display_name,
+ pushkey=pushkey,
+ pushkey_ts=self.hs.get_clock().time_msec(),
+ lang=lang,
+ data=data,
+ )
+ self._refresh_pusher(app_id, pushkey, user_name)
+
+ def _create_pusher(self, pusherdict):
+ if pusherdict['kind'] == 'http':
+ return HttpPusher(
+ self.hs,
+ profile_tag=pusherdict['profile_tag'],
+ user_name=pusherdict['user_name'],
+ app_id=pusherdict['app_id'],
+ app_display_name=pusherdict['app_display_name'],
+ device_display_name=pusherdict['device_display_name'],
+ pushkey=pusherdict['pushkey'],
+ pushkey_ts=pusherdict['ts'],
+ data=pusherdict['data'],
+ last_token=pusherdict['last_token'],
+ last_success=pusherdict['last_success'],
+ failing_since=pusherdict['failing_since']
+ )
+ else:
+ raise PusherConfigException(
+ "Unknown pusher type '%s' for user %s" %
+ (pusherdict['kind'], pusherdict['user_name'])
+ )
+
+ @defer.inlineCallbacks
+ def _refresh_pusher(self, app_id, pushkey, user_name):
+ resultlist = yield self.store.get_pushers_by_app_id_and_pushkey(
+ app_id, pushkey
+ )
+
+ p = None
+ for r in resultlist:
+ if r['user_name'] == user_name:
+ p = r
+
+ if p:
+
+ self._start_pushers([p])
+
+ def _start_pushers(self, pushers):
+ logger.info("Starting %d pushers", len(pushers))
+ for pusherdict in pushers:
+ try:
+ p = self._create_pusher(pusherdict)
+ except PusherConfigException:
+ logger.exception("Couldn't start a pusher: caught PusherConfigException")
+ continue
+ if p:
+ fullid = "%s:%s:%s" % (
+ pusherdict['app_id'],
+ pusherdict['pushkey'],
+ pusherdict['user_name']
+ )
+ if fullid in self.pushers:
+ self.pushers[fullid].stop()
+ self.pushers[fullid] = p
+ p.start()
+
+ logger.info("Started pushers")
+
+ @defer.inlineCallbacks
+ def remove_pusher(self, app_id, pushkey, user_name):
+ fullid = "%s:%s:%s" % (app_id, pushkey, user_name)
+ if fullid in self.pushers:
+ logger.info("Stopping pusher %s", fullid)
+ self.pushers[fullid].stop()
+ del self.pushers[fullid]
+ yield self.store.delete_pusher_by_app_id_pushkey_user_name(
+ app_id, pushkey, user_name
+ )
diff --git a/synapse/push/rulekinds.py b/synapse/push/rulekinds.py
new file mode 100644
index 00000000..4c591aa6
--- /dev/null
+++ b/synapse/push/rulekinds.py
@@ -0,0 +1,22 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+PRIORITY_CLASS_MAP = {
+ 'underride': 1,
+ 'sender': 2,
+ 'room': 3,
+ 'content': 4,
+ 'override': 5,
+}
+PRIORITY_CLASS_INVERSE_MAP = {v: k for k, v in PRIORITY_CLASS_MAP.items()}
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
new file mode 100644
index 00000000..e9531672
--- /dev/null
+++ b/synapse/python_dependencies.py
@@ -0,0 +1,147 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from distutils.version import LooseVersion
+
+logger = logging.getLogger(__name__)
+
+REQUIREMENTS = {
+ "frozendict>=0.4": ["frozendict"],
+ "unpaddedbase64>=1.0.1": ["unpaddedbase64>=1.0.1"],
+ "canonicaljson>=1.0.0": ["canonicaljson>=1.0.0"],
+ "signedjson>=1.0.0": ["signedjson>=1.0.0"],
+ "pynacl>=0.3.0": ["nacl>=0.3.0", "nacl.bindings"],
+ "service_identity>=1.0.0": ["service_identity>=1.0.0"],
+ "Twisted>=15.1.0": ["twisted>=15.1.0"],
+ "pyopenssl>=0.14": ["OpenSSL>=0.14"],
+ "pyyaml": ["yaml"],
+ "pyasn1": ["pyasn1"],
+ "daemonize": ["daemonize"],
+ "py-bcrypt": ["bcrypt"],
+ "pillow": ["PIL"],
+ "pydenticon": ["pydenticon"],
+ "ujson": ["ujson"],
+ "blist": ["blist"],
+ "pysaml2": ["saml2"],
+ "pymacaroons-pynacl": ["pymacaroons"],
+}
+CONDITIONAL_REQUIREMENTS = {
+ "web_client": {
+ "matrix_angular_sdk>=0.6.6": ["syweb>=0.6.6"],
+ }
+}
+
+
+def requirements(config=None, include_conditional=False):
+ reqs = REQUIREMENTS.copy()
+ if include_conditional:
+ for _, req in CONDITIONAL_REQUIREMENTS.items():
+ reqs.update(req)
+ return reqs
+
+
+def github_link(project, version, egg):
+ return "https://github.com/%s/tarball/%s/#egg=%s" % (project, version, egg)
+
+DEPENDENCY_LINKS = {
+}
+
+
+class MissingRequirementError(Exception):
+ def __init__(self, message, module_name, dependency):
+ super(MissingRequirementError, self).__init__(message)
+ self.module_name = module_name
+ self.dependency = dependency
+
+
+def check_requirements(config=None):
+ """Checks that all the modules needed by synapse have been correctly
+ installed and are at the correct version"""
+ for dependency, module_requirements in (
+ requirements(config, include_conditional=False).items()):
+ for module_requirement in module_requirements:
+ if ">=" in module_requirement:
+ module_name, required_version = module_requirement.split(">=")
+ version_test = ">="
+ elif "==" in module_requirement:
+ module_name, required_version = module_requirement.split("==")
+ version_test = "=="
+ else:
+ module_name = module_requirement
+ version_test = None
+
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ logging.exception(
+ "Can't import %r which is part of %r",
+ module_name, dependency
+ )
+ raise MissingRequirementError(
+ "Can't import %r which is part of %r"
+ % (module_name, dependency), module_name, dependency
+ )
+ version = getattr(module, "__version__", None)
+ file_path = getattr(module, "__file__", None)
+ logger.info(
+ "Using %r version %r from %r to satisfy %r",
+ module_name, version, file_path, dependency
+ )
+
+ if version_test == ">=":
+ if version is None:
+ raise MissingRequirementError(
+ "Version of %r isn't set as __version__ of module %r"
+ % (dependency, module_name), module_name, dependency
+ )
+ if LooseVersion(version) < LooseVersion(required_version):
+ raise MissingRequirementError(
+ "Version of %r in %r is too old. %r < %r"
+ % (dependency, file_path, version, required_version),
+ module_name, dependency
+ )
+ elif version_test == "==":
+ if version is None:
+ raise MissingRequirementError(
+ "Version of %r isn't set as __version__ of module %r"
+ % (dependency, module_name), module_name, dependency
+ )
+ if LooseVersion(version) != LooseVersion(required_version):
+ raise MissingRequirementError(
+ "Unexpected version of %r in %r. %r != %r"
+ % (dependency, file_path, version, required_version),
+ module_name, dependency
+ )
+
+
+def list_requirements():
+ result = []
+ linked = []
+ for link in DEPENDENCY_LINKS.values():
+ egg = link.split("#egg=")[1]
+ linked.append(egg.split('-')[0])
+ result.append(link)
+ for requirement in requirements(include_conditional=True):
+ is_linked = False
+ for link in linked:
+ if requirement.replace('-', '_').startswith(link):
+ is_linked = True
+ if not is_linked:
+ result.append(requirement)
+ return result
+
+if __name__ == "__main__":
+ import sys
+ sys.stdout.writelines(req + "\n" for req in list_requirements())
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/synapse/rest/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/rest/client/__init__.py b/synapse/rest/client/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/synapse/rest/client/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/rest/client/v1/__init__.py b/synapse/rest/client/v1/__init__.py
new file mode 100644
index 00000000..cc9b49d5
--- /dev/null
+++ b/synapse/rest/client/v1/__init__.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import (
+ room, events, register, login, profile, presence, initial_sync, directory,
+ voip, admin, pusher, push_rule
+)
+
+from synapse.http.server import JsonResource
+
+
+class ClientV1RestResource(JsonResource):
+ """A resource for version 1 of the matrix client API."""
+
+ def __init__(self, hs):
+ JsonResource.__init__(self, hs, canonical_json=False)
+ self.register_servlets(self, hs)
+
+ @staticmethod
+ def register_servlets(client_resource, hs):
+ room.register_servlets(hs, client_resource)
+ events.register_servlets(hs, client_resource)
+ register.register_servlets(hs, client_resource)
+ login.register_servlets(hs, client_resource)
+ profile.register_servlets(hs, client_resource)
+ presence.register_servlets(hs, client_resource)
+ initial_sync.register_servlets(hs, client_resource)
+ directory.register_servlets(hs, client_resource)
+ voip.register_servlets(hs, client_resource)
+ admin.register_servlets(hs, client_resource)
+ pusher.register_servlets(hs, client_resource)
+ push_rule.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
new file mode 100644
index 00000000..bdde4386
--- /dev/null
+++ b/synapse/rest/client/v1/admin.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError, SynapseError
+from synapse.types import UserID
+
+from base import ClientV1RestServlet, client_path_pattern
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class WhoisRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/admin/whois/(?P<user_id>[^/]*)")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ target_user = UserID.from_string(user_id)
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ is_admin = yield self.auth.is_server_admin(auth_user)
+
+ if not is_admin and target_user != auth_user:
+ raise AuthError(403, "You are not a server admin")
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Can only whois a local user")
+
+ ret = yield self.handlers.admin_handler.get_whois(target_user)
+
+ defer.returnValue((200, ret))
+
+
+def register_servlets(hs, http_server):
+ WhoisRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py
new file mode 100644
index 00000000..504a5e43
--- /dev/null
+++ b/synapse/rest/client/v1/base.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains base REST classes for constructing client v1 servlets.
+"""
+
+from synapse.http.servlet import RestServlet
+from synapse.api.urls import CLIENT_PREFIX
+from .transactions import HttpTransactionStore
+import re
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+def client_path_pattern(path_regex):
+ """Creates a regex compiled client path with the correct client path
+ prefix.
+
+ Args:
+ path_regex (str): The regex string to match. This should NOT have a ^
+ as this will be prefixed.
+ Returns:
+ SRE_Pattern
+ """
+ return re.compile("^" + CLIENT_PREFIX + path_regex)
+
+
+class ClientV1RestServlet(RestServlet):
+ """A base Synapse REST Servlet for the client version 1 API.
+ """
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.handlers = hs.get_handlers()
+ self.builder_factory = hs.get_event_builder_factory()
+ self.auth = hs.get_v1auth()
+ self.txns = HttpTransactionStore()
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
new file mode 100644
index 00000000..240eedac
--- /dev/null
+++ b/synapse/rest/client/v1/directory.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError, SynapseError, Codes
+from synapse.types import RoomAlias
+from .base import ClientV1RestServlet, client_path_pattern
+
+import simplejson as json
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+def register_servlets(hs, http_server):
+ ClientDirectoryServer(hs).register(http_server)
+
+
+class ClientDirectoryServer(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/directory/room/(?P<room_alias>[^/]*)$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_alias):
+ room_alias = RoomAlias.from_string(room_alias)
+
+ dir_handler = self.handlers.directory_handler
+ res = yield dir_handler.get_association(room_alias)
+
+ defer.returnValue((200, res))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_alias):
+ content = _parse_json(request)
+ if "room_id" not in content:
+ raise SynapseError(400, "Missing room_id key",
+ errcode=Codes.BAD_JSON)
+
+ logger.debug("Got content: %s", content)
+
+ room_alias = RoomAlias.from_string(room_alias)
+
+ logger.debug("Got room name: %s", room_alias.to_string())
+
+ room_id = content["room_id"]
+ servers = content["servers"] if "servers" in content else None
+
+ logger.debug("Got room_id: %s", room_id)
+ logger.debug("Got servers: %s", servers)
+
+ # TODO(erikj): Check types.
+ # TODO(erikj): Check that room exists
+
+ dir_handler = self.handlers.directory_handler
+
+ try:
+ # try to auth as a user
+ user, _, _ = yield self.auth.get_user_by_req(request)
+ try:
+ user_id = user.to_string()
+ yield dir_handler.create_association(
+ user_id, room_alias, room_id, servers
+ )
+ yield dir_handler.send_room_alias_update_event(user_id, room_id)
+ except SynapseError as e:
+ raise e
+ except:
+ logger.exception("Failed to create association")
+ raise
+ except AuthError:
+ # try to auth as an application service
+ service = yield self.auth.get_appservice_by_req(request)
+ yield dir_handler.create_appservice_association(
+ service, room_alias, room_id, servers
+ )
+ logger.info(
+ "Application service at %s created alias %s pointing to %s",
+ service.url,
+ room_alias.to_string(),
+ room_id
+ )
+
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request, room_alias):
+ dir_handler = self.handlers.directory_handler
+
+ try:
+ service = yield self.auth.get_appservice_by_req(request)
+ room_alias = RoomAlias.from_string(room_alias)
+ yield dir_handler.delete_appservice_association(
+ service, room_alias
+ )
+ logger.info(
+ "Application service at %s deleted alias %s",
+ service.url,
+ room_alias.to_string()
+ )
+ defer.returnValue((200, {}))
+ except AuthError:
+ # fallback to default user behaviour if they aren't an AS
+ pass
+
+ user, _, _ = yield self.auth.get_user_by_req(request)
+
+ is_admin = yield self.auth.is_server_admin(user)
+ if not is_admin:
+ raise AuthError(403, "You need to be a server admin")
+
+ room_alias = RoomAlias.from_string(room_alias)
+
+ yield dir_handler.delete_association(
+ user.to_string(), room_alias
+ )
+ logger.info(
+ "User %s deleted alias %s",
+ user.to_string(),
+ room_alias.to_string()
+ )
+
+ defer.returnValue((200, {}))
+
+
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.",
+ errcode=Codes.NOT_JSON)
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
new file mode 100644
index 00000000..3e1750d1
--- /dev/null
+++ b/synapse/rest/client/v1/events.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains REST servlets to do with event streaming, /events."""
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.streams.config import PaginationConfig
+from .base import ClientV1RestServlet, client_path_pattern
+from synapse.events.utils import serialize_event
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class EventStreamRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/events$")
+
+ DEFAULT_LONGPOLL_TIME_MS = 30000
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ auth_user, _, is_guest = yield self.auth.get_user_by_req(
+ request,
+ allow_guest=True
+ )
+ room_id = None
+ if is_guest:
+ if "room_id" not in request.args:
+ raise SynapseError(400, "Guest users must specify room_id param")
+ room_id = request.args["room_id"][0]
+ try:
+ handler = self.handlers.event_stream_handler
+ pagin_config = PaginationConfig.from_request(request)
+ timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
+ if "timeout" in request.args:
+ try:
+ timeout = int(request.args["timeout"][0])
+ except ValueError:
+ raise SynapseError(400, "timeout must be in milliseconds.")
+
+ as_client_event = "raw" not in request.args
+
+ chunk = yield handler.get_stream(
+ auth_user.to_string(), pagin_config, timeout=timeout,
+ as_client_event=as_client_event, affect_presence=(not is_guest),
+ room_id=room_id, is_guest=is_guest
+ )
+ except:
+ logger.exception("Event stream failed")
+ raise
+
+ defer.returnValue((200, chunk))
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+
+# TODO: Unit test gets, with and without auth, with different kinds of events.
+class EventRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/events/(?P<event_id>[^/]*)$")
+
+ def __init__(self, hs):
+ super(EventRestServlet, self).__init__(hs)
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, event_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ handler = self.handlers.event_handler
+ event = yield handler.get_event(auth_user, event_id)
+
+ time_now = self.clock.time_msec()
+ if event:
+ defer.returnValue((200, serialize_event(event, time_now)))
+ else:
+ defer.returnValue((404, "Event not found."))
+
+
+def register_servlets(hs, http_server):
+ EventStreamRestServlet(hs).register(http_server)
+ EventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
new file mode 100644
index 00000000..856a70f2
--- /dev/null
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.streams.config import PaginationConfig
+from base import ClientV1RestServlet, client_path_pattern
+
+
+# TODO: Needs unit testing
+class InitialSyncRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/initialSync$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ user, _, _ = yield self.auth.get_user_by_req(request)
+ as_client_event = "raw" not in request.args
+ pagination_config = PaginationConfig.from_request(request)
+ handler = self.handlers.message_handler
+ include_archived = request.args.get("archived", None) == ["true"]
+ content = yield handler.snapshot_all_rooms(
+ user_id=user.to_string(),
+ pagin_config=pagination_config,
+ as_client_event=as_client_event,
+ include_archived=include_archived,
+ )
+
+ defer.returnValue((200, content))
+
+
+def register_servlets(hs, http_server):
+ InitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
new file mode 100644
index 00000000..0171f6c0
--- /dev/null
+++ b/synapse/rest/client/v1/login.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError, LoginError, Codes
+from synapse.http.client import SimpleHttpClient
+from synapse.types import UserID
+from base import ClientV1RestServlet, client_path_pattern
+
+import simplejson as json
+import urllib
+import urlparse
+
+import logging
+from saml2 import BINDING_HTTP_POST
+from saml2 import config
+from saml2.client import Saml2Client
+
+import xml.etree.ElementTree as ET
+
+
+logger = logging.getLogger(__name__)
+
+
+class LoginRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/login$")
+ PASS_TYPE = "m.login.password"
+ SAML2_TYPE = "m.login.saml2"
+ CAS_TYPE = "m.login.cas"
+ TOKEN_TYPE = "m.login.token"
+
+ def __init__(self, hs):
+ super(LoginRestServlet, self).__init__(hs)
+ self.idp_redirect_url = hs.config.saml2_idp_redirect_url
+ self.password_enabled = hs.config.password_enabled
+ self.saml2_enabled = hs.config.saml2_enabled
+ self.cas_enabled = hs.config.cas_enabled
+ self.cas_server_url = hs.config.cas_server_url
+ self.cas_required_attributes = hs.config.cas_required_attributes
+ self.servername = hs.config.server_name
+
+ def on_GET(self, request):
+ flows = []
+ if self.saml2_enabled:
+ flows.append({"type": LoginRestServlet.SAML2_TYPE})
+ if self.cas_enabled:
+ flows.append({"type": LoginRestServlet.CAS_TYPE})
+ if self.password_enabled:
+ flows.append({"type": LoginRestServlet.PASS_TYPE})
+ flows.append({"type": LoginRestServlet.TOKEN_TYPE})
+ return (200, {"flows": flows})
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ login_submission = _parse_json(request)
+ try:
+ if login_submission["type"] == LoginRestServlet.PASS_TYPE:
+ if not self.password_enabled:
+ raise SynapseError(400, "Password login has been disabled.")
+
+ result = yield self.do_password_login(login_submission)
+ defer.returnValue(result)
+ elif self.saml2_enabled and (login_submission["type"] ==
+ LoginRestServlet.SAML2_TYPE):
+ relay_state = ""
+ if "relay_state" in login_submission:
+ relay_state = "&RelayState="+urllib.quote(
+ login_submission["relay_state"])
+ result = {
+ "uri": "%s%s" % (self.idp_redirect_url, relay_state)
+ }
+ defer.returnValue((200, result))
+ # TODO Delete this after all CAS clients switch to token login instead
+ elif self.cas_enabled and (login_submission["type"] ==
+ LoginRestServlet.CAS_TYPE):
+ # TODO: get this from the homeserver rather than creating a new one for
+ # each request
+ http_client = SimpleHttpClient(self.hs)
+ uri = "%s/proxyValidate" % (self.cas_server_url,)
+ args = {
+ "ticket": login_submission["ticket"],
+ "service": login_submission["service"]
+ }
+ body = yield http_client.get_raw(uri, args)
+ result = yield self.do_cas_login(body)
+ defer.returnValue(result)
+ elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
+ result = yield self.do_token_login(login_submission)
+ defer.returnValue(result)
+ else:
+ raise SynapseError(400, "Bad login type.")
+ except KeyError:
+ raise SynapseError(400, "Missing JSON keys.")
+
+ @defer.inlineCallbacks
+ def do_password_login(self, login_submission):
+ if 'medium' in login_submission and 'address' in login_submission:
+ user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
+ login_submission['medium'], login_submission['address']
+ )
+ if not user_id:
+ raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+ else:
+ user_id = login_submission['user']
+
+ if not user_id.startswith('@'):
+ user_id = UserID.create(
+ user_id, self.hs.hostname
+ ).to_string()
+
+ auth_handler = self.handlers.auth_handler
+ user_id, access_token, refresh_token = yield auth_handler.login_with_password(
+ user_id=user_id,
+ password=login_submission["password"])
+
+ result = {
+ "user_id": user_id, # may have changed
+ "access_token": access_token,
+ "refresh_token": refresh_token,
+ "home_server": self.hs.hostname,
+ }
+
+ defer.returnValue((200, result))
+
+ @defer.inlineCallbacks
+ def do_token_login(self, login_submission):
+ token = login_submission['token']
+ auth_handler = self.handlers.auth_handler
+ user_id = (
+ yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
+ )
+ user_id, access_token, refresh_token = (
+ yield auth_handler.get_login_tuple_for_user_id(user_id)
+ )
+ result = {
+ "user_id": user_id, # may have changed
+ "access_token": access_token,
+ "refresh_token": refresh_token,
+ "home_server": self.hs.hostname,
+ }
+
+ defer.returnValue((200, result))
+
+ # TODO Delete this after all CAS clients switch to token login instead
+ @defer.inlineCallbacks
+ def do_cas_login(self, cas_response_body):
+ user, attributes = self.parse_cas_response(cas_response_body)
+
+ for required_attribute, required_value in self.cas_required_attributes.items():
+ # If required attribute was not in CAS Response - Forbidden
+ if required_attribute not in attributes:
+ raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+ # Also need to check value
+ if required_value is not None:
+ actual_value = attributes[required_attribute]
+ # If required attribute value does not match expected - Forbidden
+ if required_value != actual_value:
+ raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+ user_id = UserID.create(user, self.hs.hostname).to_string()
+ auth_handler = self.handlers.auth_handler
+ user_exists = yield auth_handler.does_user_exist(user_id)
+ if user_exists:
+ user_id, access_token, refresh_token = (
+ yield auth_handler.get_login_tuple_for_user_id(user_id)
+ )
+ result = {
+ "user_id": user_id, # may have changed
+ "access_token": access_token,
+ "refresh_token": refresh_token,
+ "home_server": self.hs.hostname,
+ }
+
+ else:
+ user_id, access_token = (
+ yield self.handlers.registration_handler.register(localpart=user)
+ )
+ result = {
+ "user_id": user_id, # may have changed
+ "access_token": access_token,
+ "home_server": self.hs.hostname,
+ }
+
+ defer.returnValue((200, result))
+
+ # TODO Delete this after all CAS clients switch to token login instead
+ def parse_cas_response(self, cas_response_body):
+ root = ET.fromstring(cas_response_body)
+ if not root.tag.endswith("serviceResponse"):
+ raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
+ if not root[0].tag.endswith("authenticationSuccess"):
+ raise LoginError(401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED)
+ for child in root[0]:
+ if child.tag.endswith("user"):
+ user = child.text
+ if child.tag.endswith("attributes"):
+ attributes = {}
+ for attribute in child:
+ # ElementTree library expands the namespace in attribute tags
+ # to the full URL of the namespace.
+ # See (https://docs.python.org/2/library/xml.etree.elementtree.html)
+ # We don't care about namespace here and it will always be encased in
+ # curly braces, so we remove them.
+ if "}" in attribute.tag:
+ attributes[attribute.tag.split("}")[1]] = attribute.text
+ else:
+ attributes[attribute.tag] = attribute.text
+ if user is None or attributes is None:
+ raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
+
+ return (user, attributes)
+
+
+class SAML2RestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/login/saml2")
+
+ def __init__(self, hs):
+ super(SAML2RestServlet, self).__init__(hs)
+ self.sp_config = hs.config.saml2_config_path
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ saml2_auth = None
+ try:
+ conf = config.SPConfig()
+ conf.load_file(self.sp_config)
+ SP = Saml2Client(conf)
+ saml2_auth = SP.parse_authn_request_response(
+ request.args['SAMLResponse'][0], BINDING_HTTP_POST)
+ except Exception, e: # Not authenticated
+ logger.exception(e)
+ if saml2_auth and saml2_auth.status_ok() and not saml2_auth.not_signed:
+ username = saml2_auth.name_id.text
+ handler = self.handlers.registration_handler
+ (user_id, token) = yield handler.register_saml2(username)
+ # Forward to the RelayState callback along with ava
+ if 'RelayState' in request.args:
+ request.redirect(urllib.unquote(
+ request.args['RelayState'][0]) +
+ '?status=authenticated&access_token=' +
+ token + '&user_id=' + user_id + '&ava=' +
+ urllib.quote(json.dumps(saml2_auth.ava)))
+ request.finish()
+ defer.returnValue(None)
+ defer.returnValue((200, {"status": "authenticated",
+ "user_id": user_id, "token": token,
+ "ava": saml2_auth.ava}))
+ elif 'RelayState' in request.args:
+ request.redirect(urllib.unquote(
+ request.args['RelayState'][0]) +
+ '?status=not_authenticated')
+ request.finish()
+ defer.returnValue(None)
+ defer.returnValue((200, {"status": "not_authenticated"}))
+
+
+# TODO Delete this after all CAS clients switch to token login instead
+class CasRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/login/cas")
+
+ def __init__(self, hs):
+ super(CasRestServlet, self).__init__(hs)
+ self.cas_server_url = hs.config.cas_server_url
+
+ def on_GET(self, request):
+ return (200, {"serverUrl": self.cas_server_url})
+
+
+class CasRedirectServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/login/cas/redirect")
+
+ def __init__(self, hs):
+ super(CasRedirectServlet, self).__init__(hs)
+ self.cas_server_url = hs.config.cas_server_url
+ self.cas_service_url = hs.config.cas_service_url
+
+ def on_GET(self, request):
+ args = request.args
+ if "redirectUrl" not in args:
+ return (400, "Redirect URL not specified for CAS auth")
+ client_redirect_url_param = urllib.urlencode({
+ "redirectUrl": args["redirectUrl"][0]
+ })
+ hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket"
+ service_param = urllib.urlencode({
+ "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param)
+ })
+ request.redirect("%s?%s" % (self.cas_server_url, service_param))
+ request.finish()
+
+
+class CasTicketServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/login/cas/ticket")
+
+ def __init__(self, hs):
+ super(CasTicketServlet, self).__init__(hs)
+ self.cas_server_url = hs.config.cas_server_url
+ self.cas_service_url = hs.config.cas_service_url
+ self.cas_required_attributes = hs.config.cas_required_attributes
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ client_redirect_url = request.args["redirectUrl"][0]
+ http_client = self.hs.get_simple_http_client()
+ uri = self.cas_server_url + "/proxyValidate"
+ args = {
+ "ticket": request.args["ticket"],
+ "service": self.cas_service_url
+ }
+ body = yield http_client.get_raw(uri, args)
+ result = yield self.handle_cas_response(request, body, client_redirect_url)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def handle_cas_response(self, request, cas_response_body, client_redirect_url):
+ user, attributes = self.parse_cas_response(cas_response_body)
+
+ for required_attribute, required_value in self.cas_required_attributes.items():
+ # If required attribute was not in CAS Response - Forbidden
+ if required_attribute not in attributes:
+ raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+ # Also need to check value
+ if required_value is not None:
+ actual_value = attributes[required_attribute]
+ # If required attribute value does not match expected - Forbidden
+ if required_value != actual_value:
+ raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
+
+ user_id = UserID.create(user, self.hs.hostname).to_string()
+ auth_handler = self.handlers.auth_handler
+ user_exists = yield auth_handler.does_user_exist(user_id)
+ if not user_exists:
+ user_id, _ = (
+ yield self.handlers.registration_handler.register(localpart=user)
+ )
+
+ login_token = auth_handler.generate_short_term_login_token(user_id)
+ redirect_url = self.add_login_token_to_redirect_url(client_redirect_url,
+ login_token)
+ request.redirect(redirect_url)
+ request.finish()
+
+ def add_login_token_to_redirect_url(self, url, token):
+ url_parts = list(urlparse.urlparse(url))
+ query = dict(urlparse.parse_qsl(url_parts[4]))
+ query.update({"loginToken": token})
+ url_parts[4] = urllib.urlencode(query)
+ return urlparse.urlunparse(url_parts)
+
+ def parse_cas_response(self, cas_response_body):
+ root = ET.fromstring(cas_response_body)
+ if not root.tag.endswith("serviceResponse"):
+ raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
+ if not root[0].tag.endswith("authenticationSuccess"):
+ raise LoginError(401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED)
+ for child in root[0]:
+ if child.tag.endswith("user"):
+ user = child.text
+ if child.tag.endswith("attributes"):
+ attributes = {}
+ for attribute in child:
+ # ElementTree library expands the namespace in attribute tags
+ # to the full URL of the namespace.
+ # See (https://docs.python.org/2/library/xml.etree.elementtree.html)
+ # We don't care about namespace here and it will always be encased in
+ # curly braces, so we remove them.
+ if "}" in attribute.tag:
+ attributes[attribute.tag.split("}")[1]] = attribute.text
+ else:
+ attributes[attribute.tag] = attribute.text
+ if user is None or attributes is None:
+ raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
+
+ return (user, attributes)
+
+
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.")
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.")
+
+
+def register_servlets(hs, http_server):
+ LoginRestServlet(hs).register(http_server)
+ if hs.config.saml2_enabled:
+ SAML2RestServlet(hs).register(http_server)
+ if hs.config.cas_enabled:
+ CasRedirectServlet(hs).register(http_server)
+ CasTicketServlet(hs).register(http_server)
+ CasRestServlet(hs).register(http_server)
+ # TODO PasswordResetRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
new file mode 100644
index 00000000..6fe5d19a
--- /dev/null
+++ b/synapse/rest/client/v1/presence.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module contains REST servlets to do with presence: /presence/<paths>
+"""
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.types import UserID
+from .base import ClientV1RestServlet, client_path_pattern
+
+import simplejson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class PresenceStatusRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/presence/(?P<user_id>[^/]*)/status")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user = UserID.from_string(user_id)
+
+ state = yield self.handlers.presence_handler.get_state(
+ target_user=user, auth_user=auth_user)
+
+ defer.returnValue((200, state))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user = UserID.from_string(user_id)
+
+ state = {}
+ try:
+ content = json.loads(request.content.read())
+
+ state["presence"] = content.pop("presence")
+
+ if "status_msg" in content:
+ state["status_msg"] = content.pop("status_msg")
+ if not isinstance(state["status_msg"], basestring):
+ raise SynapseError(400, "status_msg must be a string.")
+
+ if content:
+ raise KeyError()
+ except SynapseError as e:
+ raise e
+ except:
+ raise SynapseError(400, "Unable to parse state")
+
+ yield self.handlers.presence_handler.set_state(
+ target_user=user, auth_user=auth_user, state=state)
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+
+class PresenceListRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/presence/list/(?P<user_id>[^/]*)")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user = UserID.from_string(user_id)
+
+ if not self.hs.is_mine(user):
+ raise SynapseError(400, "User not hosted on this Home Server")
+
+ if auth_user != user:
+ raise SynapseError(400, "Cannot get another user's presence list")
+
+ presence = yield self.handlers.presence_handler.get_presence_list(
+ observer_user=user, accepted=True)
+
+ for p in presence:
+ observed_user = p.pop("observed_user")
+ p["user_id"] = observed_user.to_string()
+
+ defer.returnValue((200, presence))
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user = UserID.from_string(user_id)
+
+ if not self.hs.is_mine(user):
+ raise SynapseError(400, "User not hosted on this Home Server")
+
+ if auth_user != user:
+ raise SynapseError(
+ 400, "Cannot modify another user's presence list")
+
+ try:
+ content = json.loads(request.content.read())
+ except:
+ logger.exception("JSON parse error")
+ raise SynapseError(400, "Unable to parse content")
+
+ if "invite" in content:
+ for u in content["invite"]:
+ if not isinstance(u, basestring):
+ raise SynapseError(400, "Bad invite value.")
+ if len(u) == 0:
+ continue
+ invited_user = UserID.from_string(u)
+ yield self.handlers.presence_handler.send_invite(
+ observer_user=user, observed_user=invited_user
+ )
+
+ if "drop" in content:
+ for u in content["drop"]:
+ if not isinstance(u, basestring):
+ raise SynapseError(400, "Bad drop value.")
+ if len(u) == 0:
+ continue
+ dropped_user = UserID.from_string(u)
+ yield self.handlers.presence_handler.drop(
+ observer_user=user, observed_user=dropped_user
+ )
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+
+def register_servlets(hs, http_server):
+ PresenceStatusRestServlet(hs).register(http_server)
+ PresenceListRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
new file mode 100644
index 00000000..3218e470
--- /dev/null
+++ b/synapse/rest/client/v1/profile.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module contains REST servlets to do with profile: /profile/<paths> """
+from twisted.internet import defer
+
+from .base import ClientV1RestServlet, client_path_pattern
+from synapse.types import UserID
+
+import simplejson as json
+
+
+class ProfileDisplaynameRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/profile/(?P<user_id>[^/]*)/displayname")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ user = UserID.from_string(user_id)
+
+ displayname = yield self.handlers.profile_handler.get_displayname(
+ user,
+ )
+
+ defer.returnValue((200, {"displayname": displayname}))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request, allow_guest=True)
+ user = UserID.from_string(user_id)
+
+ try:
+ content = json.loads(request.content.read())
+ new_name = content["displayname"]
+ except:
+ defer.returnValue((400, "Unable to parse name"))
+
+ yield self.handlers.profile_handler.set_displayname(
+ user, auth_user, new_name)
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, request, user_id):
+ return (200, {})
+
+
+class ProfileAvatarURLRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/profile/(?P<user_id>[^/]*)/avatar_url")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ user = UserID.from_string(user_id)
+
+ avatar_url = yield self.handlers.profile_handler.get_avatar_url(
+ user,
+ )
+
+ defer.returnValue((200, {"avatar_url": avatar_url}))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user = UserID.from_string(user_id)
+
+ try:
+ content = json.loads(request.content.read())
+ new_name = content["avatar_url"]
+ except:
+ defer.returnValue((400, "Unable to parse name"))
+
+ yield self.handlers.profile_handler.set_avatar_url(
+ user, auth_user, new_name)
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, request, user_id):
+ return (200, {})
+
+
+class ProfileRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/profile/(?P<user_id>[^/]*)")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ user = UserID.from_string(user_id)
+
+ displayname = yield self.handlers.profile_handler.get_displayname(
+ user,
+ )
+ avatar_url = yield self.handlers.profile_handler.get_avatar_url(
+ user,
+ )
+
+ defer.returnValue((200, {
+ "displayname": displayname,
+ "avatar_url": avatar_url
+ }))
+
+
+def register_servlets(hs, http_server):
+ ProfileDisplaynameRestServlet(hs).register(http_server)
+ ProfileAvatarURLRestServlet(hs).register(http_server)
+ ProfileRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
new file mode 100644
index 00000000..b0870db1
--- /dev/null
+++ b/synapse/rest/client/v1/push_rule.py
@@ -0,0 +1,462 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import (
+ SynapseError, Codes, UnrecognizedRequestError, NotFoundError, StoreError
+)
+from .base import ClientV1RestServlet, client_path_pattern
+from synapse.storage.push_rule import (
+ InconsistentRuleException, RuleNotFoundException
+)
+import synapse.push.baserules as baserules
+from synapse.push.rulekinds import (
+ PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
+)
+
+import simplejson as json
+
+
+class PushRuleRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/pushrules/.*$")
+ SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = (
+ "Unrecognised request: You probably wanted a trailing slash")
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request):
+ spec = _rule_spec_from_path(request.postpath)
+ try:
+ priority_class = _priority_class_from_spec(spec)
+ except InvalidRuleException as e:
+ raise SynapseError(400, e.message)
+
+ user, _, _ = yield self.auth.get_user_by_req(request)
+
+ if '/' in spec['rule_id'] or '\\' in spec['rule_id']:
+ raise SynapseError(400, "rule_id may not contain slashes")
+
+ content = _parse_json(request)
+
+ if 'attr' in spec:
+ self.set_rule_attr(user.to_string(), spec, content)
+ defer.returnValue((200, {}))
+
+ try:
+ (conditions, actions) = _rule_tuple_from_request_object(
+ spec['template'],
+ spec['rule_id'],
+ content,
+ device=spec['device'] if 'device' in spec else None
+ )
+ except InvalidRuleException as e:
+ raise SynapseError(400, e.message)
+
+ before = request.args.get("before", None)
+ if before and len(before):
+ before = before[0]
+ after = request.args.get("after", None)
+ if after and len(after):
+ after = after[0]
+
+ try:
+ yield self.hs.get_datastore().add_push_rule(
+ user_name=user.to_string(),
+ rule_id=_namespaced_rule_id_from_spec(spec),
+ priority_class=priority_class,
+ conditions=conditions,
+ actions=actions,
+ before=before,
+ after=after
+ )
+ except InconsistentRuleException as e:
+ raise SynapseError(400, e.message)
+ except RuleNotFoundException as e:
+ raise SynapseError(400, e.message)
+
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request):
+ spec = _rule_spec_from_path(request.postpath)
+
+ user, _, _ = yield self.auth.get_user_by_req(request)
+
+ namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
+
+ try:
+ yield self.hs.get_datastore().delete_push_rule(
+ user.to_string(), namespaced_rule_id
+ )
+ defer.returnValue((200, {}))
+ except StoreError as e:
+ if e.code == 404:
+ raise NotFoundError()
+ else:
+ raise
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ user, _, _ = yield self.auth.get_user_by_req(request)
+
+ # we build up the full structure and then decide which bits of it
+ # to send which means doing unnecessary work sometimes but is
+ # is probably not going to make a whole lot of difference
+ rawrules = yield self.hs.get_datastore().get_push_rules_for_user(
+ user.to_string()
+ )
+
+ ruleslist = []
+ for rawrule in rawrules:
+ rule = dict(rawrule)
+ rule["conditions"] = json.loads(rawrule["conditions"])
+ rule["actions"] = json.loads(rawrule["actions"])
+ ruleslist.append(rule)
+
+ ruleslist = baserules.list_with_base_rules(ruleslist, user)
+
+ rules = {'global': {}, 'device': {}}
+
+ rules['global'] = _add_empty_priority_class_arrays(rules['global'])
+
+ enabled_map = yield self.hs.get_datastore().\
+ get_push_rules_enabled_for_user(user.to_string())
+
+ for r in ruleslist:
+ rulearray = None
+
+ template_name = _priority_class_to_template_name(r['priority_class'])
+
+ if r['priority_class'] > PRIORITY_CLASS_MAP['override']:
+ # per-device rule
+ profile_tag = _profile_tag_from_conditions(r["conditions"])
+ r = _strip_device_condition(r)
+ if not profile_tag:
+ continue
+ if profile_tag not in rules['device']:
+ rules['device'][profile_tag] = {}
+ rules['device'][profile_tag] = (
+ _add_empty_priority_class_arrays(
+ rules['device'][profile_tag]
+ )
+ )
+
+ rulearray = rules['device'][profile_tag][template_name]
+ else:
+ rulearray = rules['global'][template_name]
+
+ template_rule = _rule_to_template(r)
+ if template_rule:
+ if r['rule_id'] in enabled_map:
+ template_rule['enabled'] = enabled_map[r['rule_id']]
+ elif 'enabled' in r:
+ template_rule['enabled'] = r['enabled']
+ else:
+ template_rule['enabled'] = True
+ rulearray.append(template_rule)
+
+ path = request.postpath[1:]
+
+ if path == []:
+ # we're a reference impl: pedantry is our job.
+ raise UnrecognizedRequestError(
+ PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
+ )
+
+ if path[0] == '':
+ defer.returnValue((200, rules))
+ elif path[0] == 'global':
+ path = path[1:]
+ result = _filter_ruleset_with_path(rules['global'], path)
+ defer.returnValue((200, result))
+ elif path[0] == 'device':
+ path = path[1:]
+ if path == []:
+ raise UnrecognizedRequestError(
+ PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
+ )
+ if path[0] == '':
+ defer.returnValue((200, rules['device']))
+
+ profile_tag = path[0]
+ path = path[1:]
+ if profile_tag not in rules['device']:
+ ret = {}
+ ret = _add_empty_priority_class_arrays(ret)
+ defer.returnValue((200, ret))
+ ruleset = rules['device'][profile_tag]
+ result = _filter_ruleset_with_path(ruleset, path)
+ defer.returnValue((200, result))
+ else:
+ raise UnrecognizedRequestError()
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+ def set_rule_attr(self, user_name, spec, val):
+ if spec['attr'] == 'enabled':
+ if not isinstance(val, bool):
+ raise SynapseError(400, "Value for 'enabled' must be boolean")
+ namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
+ self.hs.get_datastore().set_push_rule_enabled(
+ user_name, namespaced_rule_id, val
+ )
+ else:
+ raise UnrecognizedRequestError()
+
+ def get_rule_attr(self, user_name, namespaced_rule_id, attr):
+ if attr == 'enabled':
+ return self.hs.get_datastore().get_push_rule_enabled_by_user_rule_id(
+ user_name, namespaced_rule_id
+ )
+ else:
+ raise UnrecognizedRequestError()
+
+
+def _rule_spec_from_path(path):
+ if len(path) < 2:
+ raise UnrecognizedRequestError()
+ if path[0] != 'pushrules':
+ raise UnrecognizedRequestError()
+
+ scope = path[1]
+ path = path[2:]
+ if scope not in ['global', 'device']:
+ raise UnrecognizedRequestError()
+
+ device = None
+ if scope == 'device':
+ if len(path) == 0:
+ raise UnrecognizedRequestError()
+ device = path[0]
+ path = path[1:]
+
+ if len(path) == 0:
+ raise UnrecognizedRequestError()
+
+ template = path[0]
+ path = path[1:]
+
+ if len(path) == 0 or len(path[0]) == 0:
+ raise UnrecognizedRequestError()
+
+ rule_id = path[0]
+
+ spec = {
+ 'scope': scope,
+ 'template': template,
+ 'rule_id': rule_id
+ }
+ if device:
+ spec['profile_tag'] = device
+
+ path = path[1:]
+
+ if len(path) > 0 and len(path[0]) > 0:
+ spec['attr'] = path[0]
+
+ return spec
+
+
+def _rule_tuple_from_request_object(rule_template, rule_id, req_obj, device=None):
+ if rule_template in ['override', 'underride']:
+ if 'conditions' not in req_obj:
+ raise InvalidRuleException("Missing 'conditions'")
+ conditions = req_obj['conditions']
+ for c in conditions:
+ if 'kind' not in c:
+ raise InvalidRuleException("Condition without 'kind'")
+ elif rule_template == 'room':
+ conditions = [{
+ 'kind': 'event_match',
+ 'key': 'room_id',
+ 'pattern': rule_id
+ }]
+ elif rule_template == 'sender':
+ conditions = [{
+ 'kind': 'event_match',
+ 'key': 'user_id',
+ 'pattern': rule_id
+ }]
+ elif rule_template == 'content':
+ if 'pattern' not in req_obj:
+ raise InvalidRuleException("Content rule missing 'pattern'")
+ pat = req_obj['pattern']
+
+ conditions = [{
+ 'kind': 'event_match',
+ 'key': 'content.body',
+ 'pattern': pat
+ }]
+ else:
+ raise InvalidRuleException("Unknown rule template: %s" % (rule_template,))
+
+ if device:
+ conditions.append({
+ 'kind': 'device',
+ 'profile_tag': device
+ })
+
+ if 'actions' not in req_obj:
+ raise InvalidRuleException("No actions found")
+ actions = req_obj['actions']
+
+ for a in actions:
+ if a in ['notify', 'dont_notify', 'coalesce']:
+ pass
+ elif isinstance(a, dict) and 'set_tweak' in a:
+ pass
+ else:
+ raise InvalidRuleException("Unrecognised action")
+
+ return conditions, actions
+
+
+def _add_empty_priority_class_arrays(d):
+ for pc in PRIORITY_CLASS_MAP.keys():
+ d[pc] = []
+ return d
+
+
+def _profile_tag_from_conditions(conditions):
+ """
+ Given a list of conditions, return the profile tag of the
+ device rule if there is one
+ """
+ for c in conditions:
+ if c['kind'] == 'device':
+ return c['profile_tag']
+ return None
+
+
+def _filter_ruleset_with_path(ruleset, path):
+ if path == []:
+ raise UnrecognizedRequestError(
+ PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
+ )
+
+ if path[0] == '':
+ return ruleset
+ template_kind = path[0]
+ if template_kind not in ruleset:
+ raise UnrecognizedRequestError()
+ path = path[1:]
+ if path == []:
+ raise UnrecognizedRequestError(
+ PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
+ )
+ if path[0] == '':
+ return ruleset[template_kind]
+ rule_id = path[0]
+
+ the_rule = None
+ for r in ruleset[template_kind]:
+ if r['rule_id'] == rule_id:
+ the_rule = r
+ if the_rule is None:
+ raise NotFoundError
+
+ path = path[1:]
+ if len(path) == 0:
+ return the_rule
+
+ attr = path[0]
+ if attr in the_rule:
+ return the_rule[attr]
+ else:
+ raise UnrecognizedRequestError()
+
+
+def _priority_class_from_spec(spec):
+ if spec['template'] not in PRIORITY_CLASS_MAP.keys():
+ raise InvalidRuleException("Unknown template: %s" % (spec['kind']))
+ pc = PRIORITY_CLASS_MAP[spec['template']]
+
+ if spec['scope'] == 'device':
+ pc += len(PRIORITY_CLASS_MAP)
+
+ return pc
+
+
+def _priority_class_to_template_name(pc):
+ if pc > PRIORITY_CLASS_MAP['override']:
+ # per-device
+ prio_class_index = pc - len(PRIORITY_CLASS_MAP)
+ return PRIORITY_CLASS_INVERSE_MAP[prio_class_index]
+ else:
+ return PRIORITY_CLASS_INVERSE_MAP[pc]
+
+
+def _rule_to_template(rule):
+ unscoped_rule_id = None
+ if 'rule_id' in rule:
+ unscoped_rule_id = _rule_id_from_namespaced(rule['rule_id'])
+
+ template_name = _priority_class_to_template_name(rule['priority_class'])
+ if template_name in ['override', 'underride']:
+ templaterule = {k: rule[k] for k in ["conditions", "actions"]}
+ elif template_name in ["sender", "room"]:
+ templaterule = {'actions': rule['actions']}
+ unscoped_rule_id = rule['conditions'][0]['pattern']
+ elif template_name == 'content':
+ if len(rule["conditions"]) != 1:
+ return None
+ thecond = rule["conditions"][0]
+ if "pattern" not in thecond:
+ return None
+ templaterule = {'actions': rule['actions']}
+ templaterule["pattern"] = thecond["pattern"]
+
+ if unscoped_rule_id:
+ templaterule['rule_id'] = unscoped_rule_id
+ if 'default' in rule:
+ templaterule['default'] = rule['default']
+ return templaterule
+
+
+def _strip_device_condition(rule):
+ for i, c in enumerate(rule['conditions']):
+ if c['kind'] == 'device':
+ del rule['conditions'][i]
+ return rule
+
+
+def _namespaced_rule_id_from_spec(spec):
+ if spec['scope'] == 'global':
+ scope = 'global'
+ else:
+ scope = 'device/%s' % (spec['profile_tag'])
+ return "%s/%s/%s" % (scope, spec['template'], spec['rule_id'])
+
+
+def _rule_id_from_namespaced(in_rule_id):
+ return in_rule_id.split('/')[-1]
+
+
+class InvalidRuleException(Exception):
+ pass
+
+
+# XXX: C+ped from rest/room.py - surely this should be common?
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
+
+
+def register_servlets(hs, http_server):
+ PushRuleRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
new file mode 100644
index 00000000..a110c0a4
--- /dev/null
+++ b/synapse/rest/client/v1/pusher.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError, Codes
+from synapse.push import PusherConfigException
+from .base import ClientV1RestServlet, client_path_pattern
+
+import simplejson as json
+
+
+class PusherRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/pushers/set$")
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ user, token_id, _ = yield self.auth.get_user_by_req(request)
+
+ content = _parse_json(request)
+
+ pusher_pool = self.hs.get_pusherpool()
+
+ if ('pushkey' in content and 'app_id' in content
+ and 'kind' in content and
+ content['kind'] is None):
+ yield pusher_pool.remove_pusher(
+ content['app_id'], content['pushkey'], user_name=user.to_string()
+ )
+ defer.returnValue((200, {}))
+
+ reqd = ['profile_tag', 'kind', 'app_id', 'app_display_name',
+ 'device_display_name', 'pushkey', 'lang', 'data']
+ missing = []
+ for i in reqd:
+ if i not in content:
+ missing.append(i)
+ if len(missing):
+ raise SynapseError(400, "Missing parameters: "+','.join(missing),
+ errcode=Codes.MISSING_PARAM)
+
+ append = False
+ if 'append' in content:
+ append = content['append']
+
+ if not append:
+ yield pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
+ app_id=content['app_id'],
+ pushkey=content['pushkey'],
+ not_user_id=user.to_string()
+ )
+
+ try:
+ yield pusher_pool.add_pusher(
+ user_name=user.to_string(),
+ access_token=token_id,
+ profile_tag=content['profile_tag'],
+ kind=content['kind'],
+ app_id=content['app_id'],
+ app_display_name=content['app_display_name'],
+ device_display_name=content['device_display_name'],
+ pushkey=content['pushkey'],
+ lang=content['lang'],
+ data=content['data']
+ )
+ except PusherConfigException as pce:
+ raise SynapseError(400, "Config Error: "+pce.message,
+ errcode=Codes.MISSING_PARAM)
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
+# XXX: C+ped from rest/room.py - surely this should be common?
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.",
+ errcode=Codes.NOT_JSON)
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
+
+
+def register_servlets(hs, http_server):
+ PusherRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
new file mode 100644
index 00000000..a56834e3
--- /dev/null
+++ b/synapse/rest/client/v1/register.py
@@ -0,0 +1,368 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains REST servlets to do with registration: /register"""
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError, Codes
+from synapse.api.constants import LoginType
+from base import ClientV1RestServlet, client_path_pattern
+import synapse.util.stringutils as stringutils
+
+from synapse.util.async import run_on_reactor
+
+from hashlib import sha1
+import hmac
+import simplejson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+# We ought to be using hmac.compare_digest() but on older pythons it doesn't
+# exist. It's a _really minor_ security flaw to use plain string comparison
+# because the timing attack is so obscured by all the other code here it's
+# unlikely to make much difference
+if hasattr(hmac, "compare_digest"):
+ compare_digest = hmac.compare_digest
+else:
+ compare_digest = lambda a, b: a == b
+
+
+class RegisterRestServlet(ClientV1RestServlet):
+ """Handles registration with the home server.
+
+ This servlet is in control of the registration flow; the registration
+ handler doesn't have a concept of multi-stages or sessions.
+ """
+
+ PATTERN = client_path_pattern("/register$")
+
+ def __init__(self, hs):
+ super(RegisterRestServlet, self).__init__(hs)
+ # sessions are stored as:
+ # self.sessions = {
+ # "session_id" : { __session_dict__ }
+ # }
+ # TODO: persistent storage
+ self.sessions = {}
+ self.disable_registration = hs.config.disable_registration
+
+ def on_GET(self, request):
+ if self.hs.config.enable_registration_captcha:
+ return (
+ 200,
+ {"flows": [
+ {
+ "type": LoginType.RECAPTCHA,
+ "stages": [
+ LoginType.RECAPTCHA,
+ LoginType.EMAIL_IDENTITY,
+ LoginType.PASSWORD
+ ]
+ },
+ {
+ "type": LoginType.RECAPTCHA,
+ "stages": [LoginType.RECAPTCHA, LoginType.PASSWORD]
+ }
+ ]}
+ )
+ else:
+ return (
+ 200,
+ {"flows": [
+ {
+ "type": LoginType.EMAIL_IDENTITY,
+ "stages": [
+ LoginType.EMAIL_IDENTITY, LoginType.PASSWORD
+ ]
+ },
+ {
+ "type": LoginType.PASSWORD
+ }
+ ]}
+ )
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ register_json = _parse_json(request)
+
+ session = (register_json["session"]
+ if "session" in register_json else None)
+ login_type = None
+ if "type" not in register_json:
+ raise SynapseError(400, "Missing 'type' key.")
+
+ try:
+ login_type = register_json["type"]
+
+ is_application_server = login_type == LoginType.APPLICATION_SERVICE
+ is_using_shared_secret = login_type == LoginType.SHARED_SECRET
+
+ can_register = (
+ not self.disable_registration
+ or is_application_server
+ or is_using_shared_secret
+ )
+ if not can_register:
+ raise SynapseError(403, "Registration has been disabled")
+
+ stages = {
+ LoginType.RECAPTCHA: self._do_recaptcha,
+ LoginType.PASSWORD: self._do_password,
+ LoginType.EMAIL_IDENTITY: self._do_email_identity,
+ LoginType.APPLICATION_SERVICE: self._do_app_service,
+ LoginType.SHARED_SECRET: self._do_shared_secret,
+ }
+
+ session_info = self._get_session_info(request, session)
+ logger.debug("%s : session info %s request info %s",
+ login_type, session_info, register_json)
+ response = yield stages[login_type](
+ request,
+ register_json,
+ session_info
+ )
+
+ if "access_token" not in response:
+ # isn't a final response
+ response["session"] = session_info["id"]
+
+ defer.returnValue((200, response))
+ except KeyError as e:
+ logger.exception(e)
+ raise SynapseError(400, "Missing JSON keys for login type %s." % (
+ login_type,
+ ))
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+ def _get_session_info(self, request, session_id):
+ if not session_id:
+ # create a new session
+ while session_id is None or session_id in self.sessions:
+ session_id = stringutils.random_string(24)
+ self.sessions[session_id] = {
+ "id": session_id,
+ LoginType.EMAIL_IDENTITY: False,
+ LoginType.RECAPTCHA: False
+ }
+
+ return self.sessions[session_id]
+
+ def _save_session(self, session):
+ # TODO: Persistent storage
+ logger.debug("Saving session %s", session)
+ self.sessions[session["id"]] = session
+
+ def _remove_session(self, session):
+ logger.debug("Removing session %s", session)
+ self.sessions.pop(session["id"])
+
+ @defer.inlineCallbacks
+ def _do_recaptcha(self, request, register_json, session):
+ if not self.hs.config.enable_registration_captcha:
+ raise SynapseError(400, "Captcha not required.")
+
+ yield self._check_recaptcha(request, register_json, session)
+
+ session[LoginType.RECAPTCHA] = True # mark captcha as done
+ self._save_session(session)
+ defer.returnValue({
+ "next": [LoginType.PASSWORD, LoginType.EMAIL_IDENTITY]
+ })
+
+ @defer.inlineCallbacks
+ def _check_recaptcha(self, request, register_json, session):
+ if ("captcha_bypass_hmac" in register_json and
+ self.hs.config.captcha_bypass_secret):
+ if "user" not in register_json:
+ raise SynapseError(400, "Captcha bypass needs 'user'")
+
+ want = hmac.new(
+ key=self.hs.config.captcha_bypass_secret,
+ msg=register_json["user"],
+ digestmod=sha1,
+ ).hexdigest()
+
+ # str() because otherwise hmac complains that 'unicode' does not
+ # have the buffer interface
+ got = str(register_json["captcha_bypass_hmac"])
+
+ if compare_digest(want, got):
+ session["user"] = register_json["user"]
+ defer.returnValue(None)
+ else:
+ raise SynapseError(
+ 400, "Captcha bypass HMAC incorrect",
+ errcode=Codes.CAPTCHA_NEEDED
+ )
+
+ challenge = None
+ user_response = None
+ try:
+ challenge = register_json["challenge"]
+ user_response = register_json["response"]
+ except KeyError:
+ raise SynapseError(400, "Captcha response is required",
+ errcode=Codes.CAPTCHA_NEEDED)
+
+ ip_addr = self.hs.get_ip_from_request(request)
+
+ handler = self.handlers.registration_handler
+ yield handler.check_recaptcha(
+ ip_addr,
+ self.hs.config.recaptcha_private_key,
+ challenge,
+ user_response
+ )
+
+ @defer.inlineCallbacks
+ def _do_email_identity(self, request, register_json, session):
+ if (self.hs.config.enable_registration_captcha and
+ not session[LoginType.RECAPTCHA]):
+ raise SynapseError(400, "Captcha is required.")
+
+ threepidCreds = register_json['threepidCreds']
+ handler = self.handlers.registration_handler
+ logger.debug("Registering email. threepidcreds: %s" % (threepidCreds))
+ yield handler.register_email(threepidCreds)
+ session["threepidCreds"] = threepidCreds # store creds for next stage
+ session[LoginType.EMAIL_IDENTITY] = True # mark email as done
+ self._save_session(session)
+ defer.returnValue({
+ "next": LoginType.PASSWORD
+ })
+
+ @defer.inlineCallbacks
+ def _do_password(self, request, register_json, session):
+ yield run_on_reactor()
+ if (self.hs.config.enable_registration_captcha and
+ not session[LoginType.RECAPTCHA]):
+ # captcha should've been done by this stage!
+ raise SynapseError(400, "Captcha is required.")
+
+ if ("user" in session and "user" in register_json and
+ session["user"] != register_json["user"]):
+ raise SynapseError(
+ 400, "Cannot change user ID during registration"
+ )
+
+ password = register_json["password"].encode("utf-8")
+ desired_user_id = (
+ register_json["user"].encode("utf-8")
+ if "user" in register_json else None
+ )
+
+ handler = self.handlers.registration_handler
+ (user_id, token) = yield handler.register(
+ localpart=desired_user_id,
+ password=password
+ )
+
+ if session[LoginType.EMAIL_IDENTITY]:
+ logger.debug("Binding emails %s to %s" % (
+ session["threepidCreds"], user_id)
+ )
+ yield handler.bind_emails(user_id, session["threepidCreds"])
+
+ result = {
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname,
+ }
+ self._remove_session(session)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _do_app_service(self, request, register_json, session):
+ if "access_token" not in request.args:
+ raise SynapseError(400, "Expected application service token.")
+ if "user" not in register_json:
+ raise SynapseError(400, "Expected 'user' key.")
+
+ as_token = request.args["access_token"][0]
+ user_localpart = register_json["user"].encode("utf-8")
+
+ handler = self.handlers.registration_handler
+ (user_id, token) = yield handler.appservice_register(
+ user_localpart, as_token
+ )
+ self._remove_session(session)
+ defer.returnValue({
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname,
+ })
+
+ @defer.inlineCallbacks
+ def _do_shared_secret(self, request, register_json, session):
+ yield run_on_reactor()
+
+ if not isinstance(register_json.get("mac", None), basestring):
+ raise SynapseError(400, "Expected mac.")
+ if not isinstance(register_json.get("user", None), basestring):
+ raise SynapseError(400, "Expected 'user' key.")
+ if not isinstance(register_json.get("password", None), basestring):
+ raise SynapseError(400, "Expected 'password' key.")
+
+ if not self.hs.config.registration_shared_secret:
+ raise SynapseError(400, "Shared secret registration is not enabled")
+
+ user = register_json["user"].encode("utf-8")
+
+ # str() because otherwise hmac complains that 'unicode' does not
+ # have the buffer interface
+ got_mac = str(register_json["mac"])
+
+ want_mac = hmac.new(
+ key=self.hs.config.registration_shared_secret,
+ msg=user,
+ digestmod=sha1,
+ ).hexdigest()
+
+ password = register_json["password"].encode("utf-8")
+
+ if compare_digest(want_mac, got_mac):
+ handler = self.handlers.registration_handler
+ user_id, token = yield handler.register(
+ localpart=user,
+ password=password,
+ )
+ self._remove_session(session)
+ defer.returnValue({
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname,
+ })
+ else:
+ raise SynapseError(
+ 403, "HMAC incorrect",
+ )
+
+
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.")
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.")
+
+
+def register_servlets(hs, http_server):
+ RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
new file mode 100644
index 00000000..139dac1c
--- /dev/null
+++ b/synapse/rest/client/v1/room.py
@@ -0,0 +1,684 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module contains REST servlets to do with rooms: /rooms/<paths> """
+from twisted.internet import defer
+
+from base import ClientV1RestServlet, client_path_pattern
+from synapse.api.errors import SynapseError, Codes, AuthError
+from synapse.streams.config import PaginationConfig
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID, RoomID, RoomAlias
+from synapse.events.utils import serialize_event
+
+import simplejson as json
+import logging
+import urllib
+
+logger = logging.getLogger(__name__)
+
+
+class RoomCreateRestServlet(ClientV1RestServlet):
+ # No PATTERN; we have custom dispatch rules here
+
+ def register(self, http_server):
+ PATTERN = "/createRoom"
+ register_txn_path(self, PATTERN, http_server)
+ # define CORS for all of /rooms in RoomCreateRestServlet for simplicity
+ http_server.register_path("OPTIONS",
+ client_path_pattern("/rooms(?:/.*)?$"),
+ self.on_OPTIONS)
+ # define CORS for /createRoom[/txnid]
+ http_server.register_path("OPTIONS",
+ client_path_pattern("/createRoom(?:/.*)?$"),
+ self.on_OPTIONS)
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, txn_id):
+ try:
+ defer.returnValue(
+ self.txns.get_client_transaction(request, txn_id)
+ )
+ except KeyError:
+ pass
+
+ response = yield self.on_POST(request)
+
+ self.txns.store_client_transaction(request, txn_id, response)
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ room_config = self.get_room_config(request)
+ info = yield self.make_room(room_config, auth_user, None)
+ room_config.update(info)
+ defer.returnValue((200, info))
+
+ @defer.inlineCallbacks
+ def make_room(self, room_config, auth_user, room_id):
+ handler = self.handlers.room_creation_handler
+ info = yield handler.create_room(
+ user_id=auth_user.to_string(),
+ room_id=room_id,
+ config=room_config
+ )
+ defer.returnValue(info)
+
+ def get_room_config(self, request):
+ try:
+ user_supplied_config = json.loads(request.content.read())
+ if "visibility" not in user_supplied_config:
+ # default visibility
+ user_supplied_config["visibility"] = "public"
+ return user_supplied_config
+ except (ValueError, TypeError):
+ raise SynapseError(400, "Body must be JSON.",
+ errcode=Codes.BAD_JSON)
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+
+# TODO: Needs unit testing for generic events
+class RoomStateEventRestServlet(ClientV1RestServlet):
+ def register(self, http_server):
+ # /room/$roomid/state/$eventtype
+ no_state_key = "/rooms/(?P<room_id>[^/]*)/state/(?P<event_type>[^/]*)$"
+
+ # /room/$roomid/state/$eventtype/$statekey
+ state_key = ("/rooms/(?P<room_id>[^/]*)/state/"
+ "(?P<event_type>[^/]*)/(?P<state_key>[^/]*)$")
+
+ http_server.register_path("GET",
+ client_path_pattern(state_key),
+ self.on_GET)
+ http_server.register_path("PUT",
+ client_path_pattern(state_key),
+ self.on_PUT)
+ http_server.register_path("GET",
+ client_path_pattern(no_state_key),
+ self.on_GET_no_state_key)
+ http_server.register_path("PUT",
+ client_path_pattern(no_state_key),
+ self.on_PUT_no_state_key)
+
+ def on_GET_no_state_key(self, request, room_id, event_type):
+ return self.on_GET(request, room_id, event_type, "")
+
+ def on_PUT_no_state_key(self, request, room_id, event_type):
+ return self.on_PUT(request, room_id, event_type, "")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id, event_type, state_key):
+ user, _, is_guest = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+ msg_handler = self.handlers.message_handler
+ data = yield msg_handler.get_room_data(
+ user_id=user.to_string(),
+ room_id=room_id,
+ event_type=event_type,
+ state_key=state_key,
+ is_guest=is_guest,
+ )
+
+ if not data:
+ raise SynapseError(
+ 404, "Event not found.", errcode=Codes.NOT_FOUND
+ )
+ defer.returnValue((200, data.get_dict()["content"]))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
+ user, token_id, _ = yield self.auth.get_user_by_req(request)
+
+ content = _parse_json(request)
+
+ event_dict = {
+ "type": event_type,
+ "content": content,
+ "room_id": room_id,
+ "sender": user.to_string(),
+ }
+
+ if state_key is not None:
+ event_dict["state_key"] = state_key
+
+ msg_handler = self.handlers.message_handler
+ yield msg_handler.create_and_send_event(
+ event_dict, token_id=token_id, txn_id=txn_id,
+ )
+
+ defer.returnValue((200, {}))
+
+
+# TODO: Needs unit testing for generic events + feedback
+class RoomSendEventRestServlet(ClientV1RestServlet):
+
+ def register(self, http_server):
+ # /rooms/$roomid/send/$event_type[/$txn_id]
+ PATTERN = ("/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)")
+ register_txn_path(self, PATTERN, http_server, with_get=True)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id, event_type, txn_id=None):
+ user, token_id, _ = yield self.auth.get_user_by_req(request, allow_guest=True)
+ content = _parse_json(request)
+
+ msg_handler = self.handlers.message_handler
+ event = yield msg_handler.create_and_send_event(
+ {
+ "type": event_type,
+ "content": content,
+ "room_id": room_id,
+ "sender": user.to_string(),
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ )
+
+ defer.returnValue((200, {"event_id": event.event_id}))
+
+ def on_GET(self, request, room_id, event_type, txn_id):
+ return (200, "Not implemented")
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, event_type, txn_id):
+ try:
+ defer.returnValue(
+ self.txns.get_client_transaction(request, txn_id)
+ )
+ except KeyError:
+ pass
+
+ response = yield self.on_POST(request, room_id, event_type, txn_id)
+
+ self.txns.store_client_transaction(request, txn_id, response)
+ defer.returnValue(response)
+
+
+# TODO: Needs unit testing for room ID + alias joins
+class JoinRoomAliasServlet(ClientV1RestServlet):
+
+ def register(self, http_server):
+ # /join/$room_identifier[/$txn_id]
+ PATTERN = ("/join/(?P<room_identifier>[^/]*)")
+ register_txn_path(self, PATTERN, http_server)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_identifier, txn_id=None):
+ user, token_id, is_guest = yield self.auth.get_user_by_req(
+ request,
+ allow_guest=True
+ )
+
+ # the identifier could be a room alias or a room id. Try one then the
+ # other if it fails to parse, without swallowing other valid
+ # SynapseErrors.
+
+ identifier = None
+ is_room_alias = False
+ try:
+ identifier = RoomAlias.from_string(room_identifier)
+ is_room_alias = True
+ except SynapseError:
+ identifier = RoomID.from_string(room_identifier)
+
+ # TODO: Support for specifying the home server to join with?
+
+ if is_room_alias:
+ handler = self.handlers.room_member_handler
+ ret_dict = yield handler.join_room_alias(user, identifier)
+ defer.returnValue((200, ret_dict))
+ else: # room id
+ msg_handler = self.handlers.message_handler
+ content = {"membership": Membership.JOIN}
+ if is_guest:
+ content["kind"] = "guest"
+ yield msg_handler.create_and_send_event(
+ {
+ "type": EventTypes.Member,
+ "content": content,
+ "room_id": identifier.to_string(),
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ is_guest=is_guest,
+ )
+
+ defer.returnValue((200, {"room_id": identifier.to_string()}))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_identifier, txn_id):
+ try:
+ defer.returnValue(
+ self.txns.get_client_transaction(request, txn_id)
+ )
+ except KeyError:
+ pass
+
+ response = yield self.on_POST(request, room_identifier, txn_id)
+
+ self.txns.store_client_transaction(request, txn_id, response)
+ defer.returnValue(response)
+
+
+# TODO: Needs unit testing
+class PublicRoomListRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/publicRooms$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ handler = self.handlers.room_list_handler
+ data = yield handler.get_public_room_list()
+ defer.returnValue((200, data))
+
+
+# TODO: Needs unit testing
+class RoomMemberListRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/rooms/(?P<room_id>[^/]*)/members$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ # TODO support Pagination stream API (limit/tokens)
+ user, _, _ = yield self.auth.get_user_by_req(request)
+ handler = self.handlers.message_handler
+ events = yield handler.get_state_events(
+ room_id=room_id,
+ user_id=user.to_string(),
+ )
+
+ chunk = []
+
+ for event in events:
+ if event["type"] != EventTypes.Member:
+ continue
+ chunk.append(event)
+ # FIXME: should probably be state_key here, not user_id
+ target_user = UserID.from_string(event["user_id"])
+ # Presence is an optional cache; don't fail if we can't fetch it
+ try:
+ presence_handler = self.handlers.presence_handler
+ presence_state = yield presence_handler.get_state(
+ target_user=target_user, auth_user=user
+ )
+ event["content"].update(presence_state)
+ except:
+ pass
+
+ defer.returnValue((200, {
+ "chunk": chunk
+ }))
+
+
+# TODO: Needs better unit testing
+class RoomMessageListRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/rooms/(?P<room_id>[^/]*)/messages$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ user, _, is_guest = yield self.auth.get_user_by_req(request, allow_guest=True)
+ pagination_config = PaginationConfig.from_request(
+ request, default_limit=10,
+ )
+ as_client_event = "raw" not in request.args
+ handler = self.handlers.message_handler
+ msgs = yield handler.get_messages(
+ room_id=room_id,
+ user_id=user.to_string(),
+ is_guest=is_guest,
+ pagin_config=pagination_config,
+ as_client_event=as_client_event
+ )
+
+ defer.returnValue((200, msgs))
+
+
+# TODO: Needs unit testing
+class RoomStateRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/rooms/(?P<room_id>[^/]*)/state$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ user, _, is_guest = yield self.auth.get_user_by_req(request, allow_guest=True)
+ handler = self.handlers.message_handler
+ # Get all the current state for this room
+ events = yield handler.get_state_events(
+ room_id=room_id,
+ user_id=user.to_string(),
+ is_guest=is_guest,
+ )
+ defer.returnValue((200, events))
+
+
+# TODO: Needs unit testing
+class RoomInitialSyncRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/rooms/(?P<room_id>[^/]*)/initialSync$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ user, _, is_guest = yield self.auth.get_user_by_req(request, allow_guest=True)
+ pagination_config = PaginationConfig.from_request(request)
+ content = yield self.handlers.message_handler.room_initial_sync(
+ room_id=room_id,
+ user_id=user.to_string(),
+ pagin_config=pagination_config,
+ is_guest=is_guest,
+ )
+ defer.returnValue((200, content))
+
+
+class RoomTriggerBackfill(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/rooms/(?P<room_id>[^/]*)/backfill$")
+
+ def __init__(self, hs):
+ super(RoomTriggerBackfill, self).__init__(hs)
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ remote_server = urllib.unquote(
+ request.args["remote"][0]
+ ).decode("UTF-8")
+
+ limit = int(request.args["limit"][0])
+
+ handler = self.handlers.federation_handler
+ events = yield handler.backfill(remote_server, room_id, limit)
+
+ time_now = self.clock.time_msec()
+
+ res = [serialize_event(event, time_now) for event in events]
+ defer.returnValue((200, res))
+
+
+class RoomEventContext(ClientV1RestServlet):
+ PATTERN = client_path_pattern(
+ "/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$"
+ )
+
+ def __init__(self, hs):
+ super(RoomEventContext, self).__init__(hs)
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id, event_id):
+ user, _, is_guest = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+ limit = int(request.args.get("limit", [10])[0])
+
+ results = yield self.handlers.room_context_handler.get_event_context(
+ user, room_id, event_id, limit, is_guest
+ )
+
+ time_now = self.clock.time_msec()
+ results["events_before"] = [
+ serialize_event(event, time_now) for event in results["events_before"]
+ ]
+ results["events_after"] = [
+ serialize_event(event, time_now) for event in results["events_after"]
+ ]
+ results["state"] = [
+ serialize_event(event, time_now) for event in results["state"]
+ ]
+
+ logger.info("Responding with %r", results)
+
+ defer.returnValue((200, results))
+
+
+# TODO: Needs unit testing
+class RoomMembershipRestServlet(ClientV1RestServlet):
+
+ def register(self, http_server):
+ # /rooms/$roomid/[invite|join|leave]
+ PATTERN = ("/rooms/(?P<room_id>[^/]*)/"
+ "(?P<membership_action>join|invite|leave|ban|kick)")
+ register_txn_path(self, PATTERN, http_server)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id, membership_action, txn_id=None):
+ user, token_id, is_guest = yield self.auth.get_user_by_req(
+ request,
+ allow_guest=True
+ )
+
+ if is_guest and membership_action not in {Membership.JOIN, Membership.LEAVE}:
+ raise AuthError(403, "Guest access not allowed")
+
+ content = _parse_json(request)
+
+ # target user is you unless it is an invite
+ state_key = user.to_string()
+
+ if membership_action == "invite" and self._has_3pid_invite_keys(content):
+ yield self.handlers.room_member_handler.do_3pid_invite(
+ room_id,
+ user,
+ content["medium"],
+ content["address"],
+ content["id_server"],
+ token_id,
+ txn_id
+ )
+ defer.returnValue((200, {}))
+ return
+ elif membership_action in ["invite", "ban", "kick"]:
+ if "user_id" in content:
+ state_key = content["user_id"]
+ else:
+ raise SynapseError(400, "Missing user_id key.")
+
+ # make sure it looks like a user ID; it'll throw if it's invalid.
+ UserID.from_string(state_key)
+
+ if membership_action == "kick":
+ membership_action = "leave"
+
+ msg_handler = self.handlers.message_handler
+
+ content = {"membership": unicode(membership_action)}
+ if is_guest:
+ content["kind"] = "guest"
+
+ yield msg_handler.create_and_send_event(
+ {
+ "type": EventTypes.Member,
+ "content": content,
+ "room_id": room_id,
+ "sender": user.to_string(),
+ "state_key": state_key,
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ is_guest=is_guest,
+ )
+
+ defer.returnValue((200, {}))
+
+ def _has_3pid_invite_keys(self, content):
+ for key in {"id_server", "medium", "address"}:
+ if key not in content:
+ return False
+ return True
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, membership_action, txn_id):
+ try:
+ defer.returnValue(
+ self.txns.get_client_transaction(request, txn_id)
+ )
+ except KeyError:
+ pass
+
+ response = yield self.on_POST(
+ request, room_id, membership_action, txn_id
+ )
+
+ self.txns.store_client_transaction(request, txn_id, response)
+ defer.returnValue(response)
+
+
+class RoomRedactEventRestServlet(ClientV1RestServlet):
+ def register(self, http_server):
+ PATTERN = ("/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)")
+ register_txn_path(self, PATTERN, http_server)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id, event_id, txn_id=None):
+ user, token_id, _ = yield self.auth.get_user_by_req(request)
+ content = _parse_json(request)
+
+ msg_handler = self.handlers.message_handler
+ event = yield msg_handler.create_and_send_event(
+ {
+ "type": EventTypes.Redaction,
+ "content": content,
+ "room_id": room_id,
+ "sender": user.to_string(),
+ "redacts": event_id,
+ },
+ token_id=token_id,
+ txn_id=txn_id,
+ )
+
+ defer.returnValue((200, {"event_id": event.event_id}))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, event_id, txn_id):
+ try:
+ defer.returnValue(
+ self.txns.get_client_transaction(request, txn_id)
+ )
+ except KeyError:
+ pass
+
+ response = yield self.on_POST(request, room_id, event_id, txn_id)
+
+ self.txns.store_client_transaction(request, txn_id, response)
+ defer.returnValue(response)
+
+
+class RoomTypingRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern(
+ "/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$"
+ )
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id, user_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ room_id = urllib.unquote(room_id)
+ target_user = UserID.from_string(urllib.unquote(user_id))
+
+ content = _parse_json(request)
+
+ typing_handler = self.handlers.typing_notification_handler
+
+ if content["typing"]:
+ yield typing_handler.started_typing(
+ target_user=target_user,
+ auth_user=auth_user,
+ room_id=room_id,
+ timeout=content.get("timeout", 30000),
+ )
+ else:
+ yield typing_handler.stopped_typing(
+ target_user=target_user,
+ auth_user=auth_user,
+ room_id=room_id,
+ )
+
+ defer.returnValue((200, {}))
+
+
+class SearchRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern(
+ "/search$"
+ )
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ content = _parse_json(request)
+
+ batch = request.args.get("next_batch", [None])[0]
+ results = yield self.handlers.search_handler.search(auth_user, content, batch)
+
+ defer.returnValue((200, results))
+
+
+def _parse_json(request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.",
+ errcode=Codes.NOT_JSON)
+ return content
+ except ValueError:
+ raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
+
+
+def register_txn_path(servlet, regex_string, http_server, with_get=False):
+ """Registers a transaction-based path.
+
+ This registers two paths:
+ PUT regex_string/$txnid
+ POST regex_string
+
+ Args:
+ regex_string (str): The regex string to register. Must NOT have a
+ trailing $ as this string will be appended to.
+ http_server : The http_server to register paths with.
+ with_get: True to also register respective GET paths for the PUTs.
+ """
+ http_server.register_path(
+ "POST",
+ client_path_pattern(regex_string + "$"),
+ servlet.on_POST
+ )
+ http_server.register_path(
+ "PUT",
+ client_path_pattern(regex_string + "/(?P<txn_id>[^/]*)$"),
+ servlet.on_PUT
+ )
+ if with_get:
+ http_server.register_path(
+ "GET",
+ client_path_pattern(regex_string + "/(?P<txn_id>[^/]*)$"),
+ servlet.on_GET
+ )
+
+
+def register_servlets(hs, http_server):
+ RoomStateEventRestServlet(hs).register(http_server)
+ RoomCreateRestServlet(hs).register(http_server)
+ RoomMemberListRestServlet(hs).register(http_server)
+ RoomMessageListRestServlet(hs).register(http_server)
+ JoinRoomAliasServlet(hs).register(http_server)
+ RoomTriggerBackfill(hs).register(http_server)
+ RoomMembershipRestServlet(hs).register(http_server)
+ RoomSendEventRestServlet(hs).register(http_server)
+ PublicRoomListRestServlet(hs).register(http_server)
+ RoomStateRestServlet(hs).register(http_server)
+ RoomInitialSyncRestServlet(hs).register(http_server)
+ RoomRedactEventRestServlet(hs).register(http_server)
+ RoomTypingRestServlet(hs).register(http_server)
+ SearchRestServlet(hs).register(http_server)
+ RoomEventContext(hs).register(http_server)
diff --git a/synapse/rest/client/v1/transactions.py b/synapse/rest/client/v1/transactions.py
new file mode 100644
index 00000000..b861069b
--- /dev/null
+++ b/synapse/rest/client/v1/transactions.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains logic for storing HTTP PUT transactions. This is used
+to ensure idempotency when performing PUTs using the REST API."""
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+# FIXME: elsewhere we use FooStore to indicate something in the storage layer...
+class HttpTransactionStore(object):
+
+ def __init__(self):
+ # { key : (txn_id, response) }
+ self.transactions = {}
+
+ def get_response(self, key, txn_id):
+ """Retrieve a response for this request.
+
+ Args:
+ key (str): A transaction-independent key for this request. Usually
+ this is a combination of the path (without the transaction id)
+ and the user's access token.
+ txn_id (str): The transaction ID for this request
+ Returns:
+ A tuple of (HTTP response code, response content) or None.
+ """
+ try:
+ logger.debug("get_response TxnId: %s", txn_id)
+ (last_txn_id, response) = self.transactions[key]
+ if txn_id == last_txn_id:
+ logger.info("get_response: Returning a response for %s", txn_id)
+ return response
+ except KeyError:
+ pass
+ return None
+
+ def store_response(self, key, txn_id, response):
+ """Stores an HTTP response tuple.
+
+ Args:
+ key (str): A transaction-independent key for this request. Usually
+ this is a combination of the path (without the transaction id)
+ and the user's access token.
+ txn_id (str): The transaction ID for this request.
+ response (tuple): A tuple of (HTTP response code, response content)
+ """
+ logger.debug("store_response TxnId: %s", txn_id)
+ self.transactions[key] = (txn_id, response)
+
+ def store_client_transaction(self, request, txn_id, response):
+ """Stores the request/response pair of an HTTP transaction.
+
+ Args:
+ request (twisted.web.http.Request): The twisted HTTP request. This
+ request must have the transaction ID as the last path segment.
+ response (tuple): A tuple of (response code, response dict)
+ txn_id (str): The transaction ID for this request.
+ """
+ self.store_response(self._get_key(request), txn_id, response)
+
+ def get_client_transaction(self, request, txn_id):
+ """Retrieves a stored response if there was one.
+
+ Args:
+ request (twisted.web.http.Request): The twisted HTTP request. This
+ request must have the transaction ID as the last path segment.
+ txn_id (str): The transaction ID for this request.
+ Returns:
+ The response tuple.
+ Raises:
+ KeyError if the transaction was not found.
+ """
+ response = self.get_response(self._get_key(request), txn_id)
+ if response is None:
+ raise KeyError("Transaction not found.")
+ return response
+
+ def _get_key(self, request):
+ token = request.args["access_token"][0]
+ path_without_txn_id = request.path.rsplit("/", 1)[0]
+ return path_without_txn_id + "/" + token
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
new file mode 100644
index 00000000..eb7c57ca
--- /dev/null
+++ b/synapse/rest/client/v1/voip.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from base import ClientV1RestServlet, client_path_pattern
+
+
+import hmac
+import hashlib
+import base64
+
+
+class VoipRestServlet(ClientV1RestServlet):
+ PATTERN = client_path_pattern("/voip/turnServer$")
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ turnUris = self.hs.config.turn_uris
+ turnSecret = self.hs.config.turn_shared_secret
+ userLifetime = self.hs.config.turn_user_lifetime
+ if not turnUris or not turnSecret or not userLifetime:
+ defer.returnValue((200, {}))
+
+ expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000
+ username = "%d:%s" % (expiry, auth_user.to_string())
+
+ mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1)
+ # We need to use standard padded base64 encoding here
+ # encode_base64 because we need to add the standard padding to get the
+ # same result as the TURN server.
+ password = base64.b64encode(mac.digest())
+
+ defer.returnValue((200, {
+ 'username': username,
+ 'password': password,
+ 'ttl': userLifetime / 1000,
+ 'uris': turnUris,
+ }))
+
+ def on_OPTIONS(self, request):
+ return (200, {})
+
+
+def register_servlets(hs, http_server):
+ VoipRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/__init__.py b/synapse/rest/client/v2_alpha/__init__.py
new file mode 100644
index 00000000..a1081323
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/__init__.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import (
+ sync,
+ filter,
+ account,
+ register,
+ auth,
+ receipts,
+ keys,
+ tokenrefresh,
+ tags,
+)
+
+from synapse.http.server import JsonResource
+
+
+class ClientV2AlphaRestResource(JsonResource):
+ """A resource for version 2 alpha of the matrix client API."""
+
+ def __init__(self, hs):
+ JsonResource.__init__(self, hs, canonical_json=False)
+ self.register_servlets(self, hs)
+
+ @staticmethod
+ def register_servlets(client_resource, hs):
+ sync.register_servlets(hs, client_resource)
+ filter.register_servlets(hs, client_resource)
+ account.register_servlets(hs, client_resource)
+ register.register_servlets(hs, client_resource)
+ auth.register_servlets(hs, client_resource)
+ receipts.register_servlets(hs, client_resource)
+ keys.register_servlets(hs, client_resource)
+ tokenrefresh.register_servlets(hs, client_resource)
+ tags.register_servlets(hs, client_resource)
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
new file mode 100644
index 00000000..4540e8dc
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains base REST classes for constructing client v1 servlets.
+"""
+
+from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX
+from synapse.api.errors import SynapseError
+import re
+
+import logging
+import simplejson
+
+
+logger = logging.getLogger(__name__)
+
+
+def client_v2_pattern(path_regex):
+ """Creates a regex compiled client path with the correct client path
+ prefix.
+
+ Args:
+ path_regex (str): The regex string to match. This should NOT have a ^
+ as this will be prefixed.
+ Returns:
+ SRE_Pattern
+ """
+ return re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex)
+
+
+def parse_request_allow_empty(request):
+ content = request.content.read()
+ if content is None or content == '':
+ return None
+ try:
+ return simplejson.loads(content)
+ except simplejson.JSONDecodeError:
+ raise SynapseError(400, "Content not JSON.")
+
+
+def parse_json_dict_from_request(request):
+ try:
+ content = simplejson.loads(request.content.read())
+ if type(content) != dict:
+ raise SynapseError(400, "Content must be a JSON object.")
+ return content
+ except simplejson.JSONDecodeError:
+ raise SynapseError(400, "Content not JSON.")
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
new file mode 100644
index 00000000..1970ad34
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import LoginError, SynapseError, Codes
+from synapse.http.servlet import RestServlet
+from synapse.util.async import run_on_reactor
+
+from ._base import client_v2_pattern, parse_json_dict_from_request
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordRestServlet(RestServlet):
+ PATTERN = client_v2_pattern("/account/password")
+
+ def __init__(self, hs):
+ super(PasswordRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.auth_handler = hs.get_handlers().auth_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ yield run_on_reactor()
+
+ body = parse_json_dict_from_request(request)
+
+ authed, result, params = yield self.auth_handler.check_auth([
+ [LoginType.PASSWORD],
+ [LoginType.EMAIL_IDENTITY]
+ ], body, self.hs.get_ip_from_request(request))
+
+ if not authed:
+ defer.returnValue((401, result))
+
+ user_id = None
+
+ if LoginType.PASSWORD in result:
+ # if using password, they should also be logged in
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ if auth_user.to_string() != result[LoginType.PASSWORD]:
+ raise LoginError(400, "", Codes.UNKNOWN)
+ user_id = auth_user.to_string()
+ elif LoginType.EMAIL_IDENTITY in result:
+ threepid = result[LoginType.EMAIL_IDENTITY]
+ if 'medium' not in threepid or 'address' not in threepid:
+ raise SynapseError(500, "Malformed threepid")
+ # if using email, we must know about the email they're authing with!
+ threepid_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
+ threepid['medium'], threepid['address']
+ )
+ if not threepid_user_id:
+ raise SynapseError(404, "Email address not found", Codes.NOT_FOUND)
+ user_id = threepid_user_id
+ else:
+ logger.error("Auth succeeded but no known type!", result.keys())
+ raise SynapseError(500, "", Codes.UNKNOWN)
+
+ if 'new_password' not in params:
+ raise SynapseError(400, "", Codes.MISSING_PARAM)
+ new_password = params['new_password']
+
+ yield self.auth_handler.set_password(
+ user_id, new_password
+ )
+
+ defer.returnValue((200, {}))
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
+class ThreepidRestServlet(RestServlet):
+ PATTERN = client_v2_pattern("/account/3pid")
+
+ def __init__(self, hs):
+ super(ThreepidRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+ self.auth = hs.get_auth()
+ self.auth_handler = hs.get_handlers().auth_handler
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ yield run_on_reactor()
+
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ threepids = yield self.hs.get_datastore().user_get_threepids(
+ auth_user.to_string()
+ )
+
+ defer.returnValue((200, {'threepids': threepids}))
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ yield run_on_reactor()
+
+ body = parse_json_dict_from_request(request)
+
+ if 'threePidCreds' not in body:
+ raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
+ threePidCreds = body['threePidCreds']
+
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
+
+ if not threepid:
+ raise SynapseError(
+ 400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
+ )
+
+ for reqd in ['medium', 'address', 'validated_at']:
+ if reqd not in threepid:
+ logger.warn("Couldn't add 3pid: invalid response from ID sevrer")
+ raise SynapseError(500, "Invalid response from ID Server")
+
+ yield self.auth_handler.add_threepid(
+ auth_user.to_string(),
+ threepid['medium'],
+ threepid['address'],
+ threepid['validated_at'],
+ )
+
+ if 'bind' in body and body['bind']:
+ logger.debug(
+ "Binding emails %s to %s",
+ threepid, auth_user.to_string()
+ )
+ yield self.identity_handler.bind_threepid(
+ threePidCreds, auth_user.to_string()
+ )
+
+ defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+ PasswordRestServlet(hs).register(http_server)
+ ThreepidRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
new file mode 100644
index 00000000..4c726f05
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import SynapseError
+from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX
+from synapse.http.servlet import RestServlet
+
+from ._base import client_v2_pattern
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+RECAPTCHA_TEMPLATE = """
+<html>
+<head>
+<title>Authentication</title>
+<meta name='viewport' content='width=device-width, initial-scale=1,
+ user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<script src="https://www.google.com/recaptcha/api.js"
+ async defer></script>
+<script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
+<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+<script>
+function captchaDone() {
+ $('#registrationForm').submit();
+}
+</script>
+</head>
+<body>
+<form id="registrationForm" method="post" action="%(myurl)s">
+ <div>
+ <p>
+ Hello! We need to prevent computer programs and other automated
+ things from creating accounts on this server.
+ </p>
+ <p>
+ Please verify that you're not a robot.
+ </p>
+ <input type="hidden" name="session" value="%(session)s" />
+ <div class="g-recaptcha"
+ data-sitekey="%(sitekey)s"
+ data-callback="captchaDone">
+ </div>
+ <noscript>
+ <input type="submit" value="All Done" />
+ </noscript>
+ </div>
+ </div>
+</form>
+</body>
+</html>
+"""
+
+SUCCESS_TEMPLATE = """
+<html>
+<head>
+<title>Success!</title>
+<meta name='viewport' content='width=device-width, initial-scale=1,
+ user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+<script>
+if (window.onAuthDone != undefined) {
+ window.onAuthDone();
+}
+</script>
+</head>
+<body>
+ <div>
+ <p>Thank you</p>
+ <p>You may now close this window and return to the application</p>
+ </div>
+</body>
+</html>
+"""
+
+
+class AuthRestServlet(RestServlet):
+ """
+ Handles Client / Server API authentication in any situations where it
+ cannot be handled in the normal flow (with requests to the same endpoint).
+ Current use is for web fallback auth.
+ """
+ PATTERN = client_v2_pattern("/auth/(?P<stagetype>[\w\.]*)/fallback/web")
+
+ def __init__(self, hs):
+ super(AuthRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.auth_handler = hs.get_handlers().auth_handler
+ self.registration_handler = hs.get_handlers().registration_handler
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, stagetype):
+ yield
+ if stagetype == LoginType.RECAPTCHA:
+ if ('session' not in request.args or
+ len(request.args['session']) == 0):
+ raise SynapseError(400, "No session supplied")
+
+ session = request.args["session"][0]
+
+ html = RECAPTCHA_TEMPLATE % {
+ 'session': session,
+ 'myurl': "%s/auth/%s/fallback/web" % (
+ CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA
+ ),
+ 'sitekey': self.hs.config.recaptcha_public_key,
+ }
+ html_bytes = html.encode("utf8")
+ request.setResponseCode(200)
+ request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+ request.setHeader(b"Server", self.hs.version_string)
+ request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+ request.write(html_bytes)
+ request.finish()
+ defer.returnValue(None)
+ else:
+ raise SynapseError(404, "Unknown auth stage type")
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, stagetype):
+ yield
+ if stagetype == "m.login.recaptcha":
+ if ('g-recaptcha-response' not in request.args or
+ len(request.args['g-recaptcha-response'])) == 0:
+ raise SynapseError(400, "No captcha response supplied")
+ if ('session' not in request.args or
+ len(request.args['session'])) == 0:
+ raise SynapseError(400, "No session supplied")
+
+ session = request.args['session'][0]
+
+ authdict = {
+ 'response': request.args['g-recaptcha-response'][0],
+ 'session': session,
+ }
+
+ success = yield self.auth_handler.add_oob_auth(
+ LoginType.RECAPTCHA,
+ authdict,
+ self.hs.get_ip_from_request(request)
+ )
+
+ if success:
+ html = SUCCESS_TEMPLATE
+ else:
+ html = RECAPTCHA_TEMPLATE % {
+ 'session': session,
+ 'myurl': "%s/auth/%s/fallback/web" % (
+ CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA
+ ),
+ 'sitekey': self.hs.config.recaptcha_public_key,
+ }
+ html_bytes = html.encode("utf8")
+ request.setResponseCode(200)
+ request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+ request.setHeader(b"Server", self.hs.version_string)
+ request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
+
+ request.write(html_bytes)
+ request.finish()
+
+ defer.returnValue(None)
+ else:
+ raise SynapseError(404, "Unknown auth stage type")
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
+def register_servlets(hs, http_server):
+ AuthRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
new file mode 100644
index 00000000..97956a4b
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError, SynapseError
+from synapse.http.servlet import RestServlet
+from synapse.types import UserID
+
+from ._base import client_v2_pattern
+
+import simplejson as json
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class GetFilterRestServlet(RestServlet):
+ PATTERN = client_v2_pattern("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
+
+ def __init__(self, hs):
+ super(GetFilterRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.filtering = hs.get_filtering()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id, filter_id):
+ target_user = UserID.from_string(user_id)
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ if target_user != auth_user:
+ raise AuthError(403, "Cannot get filters for other users")
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Can only get filters for local users")
+
+ try:
+ filter_id = int(filter_id)
+ except:
+ raise SynapseError(400, "Invalid filter_id")
+
+ try:
+ filter = yield self.filtering.get_user_filter(
+ user_localpart=target_user.localpart,
+ filter_id=filter_id,
+ )
+
+ defer.returnValue((200, filter.filter_json))
+ except KeyError:
+ raise SynapseError(400, "No such filter")
+
+
+class CreateFilterRestServlet(RestServlet):
+ PATTERN = client_v2_pattern("/user/(?P<user_id>[^/]*)/filter")
+
+ def __init__(self, hs):
+ super(CreateFilterRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.filtering = hs.get_filtering()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, user_id):
+ target_user = UserID.from_string(user_id)
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ if target_user != auth_user:
+ raise AuthError(403, "Cannot create filters for other users")
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Can only create filters for local users")
+
+ try:
+ content = json.loads(request.content.read())
+
+ # TODO(paul): check for required keys and invalid keys
+ except:
+ raise SynapseError(400, "Invalid filter definition")
+
+ filter_id = yield self.filtering.add_user_filter(
+ user_localpart=target_user.localpart,
+ user_filter=content,
+ )
+
+ defer.returnValue((200, {"filter_id": str(filter_id)}))
+
+
+def register_servlets(hs, http_server):
+ GetFilterRestServlet(hs).register(http_server)
+ CreateFilterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
new file mode 100644
index 00000000..820d3333
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import RestServlet
+from synapse.types import UserID
+
+from canonicaljson import encode_canonical_json
+
+from ._base import client_v2_pattern
+
+import simplejson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class KeyUploadServlet(RestServlet):
+ """
+ POST /keys/upload/<device_id> HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "device_keys": {
+ "user_id": "<user_id>",
+ "device_id": "<device_id>",
+ "valid_until_ts": <millisecond_timestamp>,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha256",
+ ]
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures:" {
+ "<user_id>" {
+ "<algorithm>:<device_id>": "<signature_base64>"
+ } } },
+ "one_time_keys": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ },
+ }
+ """
+ PATTERN = client_v2_pattern("/keys/upload/(?P<device_id>[^/]*)")
+
+ def __init__(self, hs):
+ super(KeyUploadServlet, self).__init__()
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+ self.auth = hs.get_auth()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, device_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user_id = auth_user.to_string()
+ # TODO: Check that the device_id matches that in the authentication
+ # or derive the device_id from the authentication instead.
+ try:
+ body = json.loads(request.content.read())
+ except:
+ raise SynapseError(400, "Invalid key JSON")
+ time_now = self.clock.time_msec()
+
+ # TODO: Validate the JSON to make sure it has the right keys.
+ device_keys = body.get("device_keys", None)
+ if device_keys:
+ logger.info(
+ "Updating device_keys for device %r for user %r at %d",
+ device_id, auth_user, time_now
+ )
+ # TODO: Sign the JSON with the server key
+ yield self.store.set_e2e_device_keys(
+ user_id, device_id, time_now,
+ encode_canonical_json(device_keys)
+ )
+
+ one_time_keys = body.get("one_time_keys", None)
+ if one_time_keys:
+ logger.info(
+ "Adding %d one_time_keys for device %r for user %r at %d",
+ len(one_time_keys), device_id, user_id, time_now
+ )
+ key_list = []
+ for key_id, key_json in one_time_keys.items():
+ algorithm, key_id = key_id.split(":")
+ key_list.append((
+ algorithm, key_id, encode_canonical_json(key_json)
+ ))
+
+ yield self.store.add_e2e_one_time_keys(
+ user_id, device_id, time_now, key_list
+ )
+
+ result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
+ defer.returnValue((200, {"one_time_key_counts": result}))
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, device_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ user_id = auth_user.to_string()
+
+ result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
+ defer.returnValue((200, {"one_time_key_counts": result}))
+
+
+class KeyQueryServlet(RestServlet):
+ """
+ GET /keys/query/<user_id> HTTP/1.1
+
+ GET /keys/query/<user_id>/<device_id> HTTP/1.1
+
+ POST /keys/query HTTP/1.1
+ Content-Type: application/json
+ {
+ "device_keys": {
+ "<user_id>": ["<device_id>"]
+ } }
+
+ HTTP/1.1 200 OK
+ {
+ "device_keys": {
+ "<user_id>": {
+ "<device_id>": {
+ "user_id": "<user_id>", // Duplicated to be signed
+ "device_id": "<device_id>", // Duplicated to be signed
+ "valid_until_ts": <millisecond_timestamp>,
+ "algorithms": [ // List of supported algorithms
+ "m.olm.curve25519-aes-sha256",
+ ],
+ "keys": { // Must include a ed25519 signing key
+ "<algorithm>:<key_id>": "<key_base64>",
+ },
+ "signatures:" {
+ // Must be signed with device's ed25519 key
+ "<user_id>/<device_id>": {
+ "<algorithm>:<key_id>": "<signature_base64>"
+ }
+ // Must be signed by this server.
+ "<server_name>": {
+ "<algorithm>:<key_id>": "<signature_base64>"
+ } } } } } }
+ """
+
+ PATTERN = client_v2_pattern(
+ "/keys/query(?:"
+ "/(?P<user_id>[^/]*)(?:"
+ "/(?P<device_id>[^/]*)"
+ ")?"
+ ")?"
+ )
+
+ def __init__(self, hs):
+ super(KeyQueryServlet, self).__init__()
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.federation = hs.get_replication_layer()
+ self.is_mine = hs.is_mine
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, user_id, device_id):
+ yield self.auth.get_user_by_req(request)
+ try:
+ body = json.loads(request.content.read())
+ except:
+ raise SynapseError(400, "Invalid key JSON")
+ result = yield self.handle_request(body)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id, device_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ auth_user_id = auth_user.to_string()
+ user_id = user_id if user_id else auth_user_id
+ device_ids = [device_id] if device_id else []
+ result = yield self.handle_request(
+ {"device_keys": {user_id: device_ids}}
+ )
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def handle_request(self, body):
+ local_query = []
+ remote_queries = {}
+ for user_id, device_ids in body.get("device_keys", {}).items():
+ user = UserID.from_string(user_id)
+ if self.is_mine(user):
+ if not device_ids:
+ local_query.append((user_id, None))
+ else:
+ for device_id in device_ids:
+ local_query.append((user_id, device_id))
+ else:
+ remote_queries.setdefault(user.domain, {})[user_id] = list(
+ device_ids
+ )
+ results = yield self.store.get_e2e_device_keys(local_query)
+
+ json_result = {}
+ for user_id, device_keys in results.items():
+ for device_id, json_bytes in device_keys.items():
+ json_result.setdefault(user_id, {})[device_id] = json.loads(
+ json_bytes
+ )
+
+ for destination, device_keys in remote_queries.items():
+ remote_result = yield self.federation.query_client_keys(
+ destination, {"device_keys": device_keys}
+ )
+ for user_id, keys in remote_result["device_keys"].items():
+ if user_id in device_keys:
+ json_result[user_id] = keys
+ defer.returnValue((200, {"device_keys": json_result}))
+
+
+class OneTimeKeyServlet(RestServlet):
+ """
+ GET /keys/claim/<user-id>/<device-id>/<algorithm> HTTP/1.1
+
+ POST /keys/claim HTTP/1.1
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": "<algorithm>"
+ } } }
+
+ HTTP/1.1 200 OK
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ } } } }
+
+ """
+ PATTERN = client_v2_pattern(
+ "/keys/claim(?:/?|(?:/"
+ "(?P<user_id>[^/]*)/(?P<device_id>[^/]*)/(?P<algorithm>[^/]*)"
+ ")?)"
+ )
+
+ def __init__(self, hs):
+ super(OneTimeKeyServlet, self).__init__()
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.federation = hs.get_replication_layer()
+ self.is_mine = hs.is_mine
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id, device_id, algorithm):
+ yield self.auth.get_user_by_req(request)
+ result = yield self.handle_request(
+ {"one_time_keys": {user_id: {device_id: algorithm}}}
+ )
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, user_id, device_id, algorithm):
+ yield self.auth.get_user_by_req(request)
+ try:
+ body = json.loads(request.content.read())
+ except:
+ raise SynapseError(400, "Invalid key JSON")
+ result = yield self.handle_request(body)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def handle_request(self, body):
+ local_query = []
+ remote_queries = {}
+ for user_id, device_keys in body.get("one_time_keys", {}).items():
+ user = UserID.from_string(user_id)
+ if self.is_mine(user):
+ for device_id, algorithm in device_keys.items():
+ local_query.append((user_id, device_id, algorithm))
+ else:
+ remote_queries.setdefault(user.domain, {})[user_id] = (
+ device_keys
+ )
+ results = yield self.store.claim_e2e_one_time_keys(local_query)
+
+ json_result = {}
+ for user_id, device_keys in results.items():
+ for device_id, keys in device_keys.items():
+ for key_id, json_bytes in keys.items():
+ json_result.setdefault(user_id, {})[device_id] = {
+ key_id: json.loads(json_bytes)
+ }
+
+ for destination, device_keys in remote_queries.items():
+ remote_result = yield self.federation.claim_client_keys(
+ destination, {"one_time_keys": device_keys}
+ )
+ for user_id, keys in remote_result["one_time_keys"].items():
+ if user_id in device_keys:
+ json_result[user_id] = keys
+
+ defer.returnValue((200, {"one_time_keys": json_result}))
+
+
+def register_servlets(hs, http_server):
+ KeyUploadServlet(hs).register(http_server)
+ KeyQueryServlet(hs).register(http_server)
+ OneTimeKeyServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
new file mode 100644
index 00000000..788acd4a
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import RestServlet
+from ._base import client_v2_pattern
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class ReceiptRestServlet(RestServlet):
+ PATTERN = client_v2_pattern(
+ "/rooms/(?P<room_id>[^/]*)"
+ "/receipt/(?P<receipt_type>[^/]*)"
+ "/(?P<event_id>[^/]*)$"
+ )
+
+ def __init__(self, hs):
+ super(ReceiptRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.receipts_handler = hs.get_handlers().receipts_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id, receipt_type, event_id):
+ user, _, _ = yield self.auth.get_user_by_req(request)
+
+ if receipt_type != "m.read":
+ raise SynapseError(400, "Receipt type must be 'm.read'")
+
+ yield self.receipts_handler.received_client_receipt(
+ room_id,
+ receipt_type,
+ user_id=user.to_string(),
+ event_id=event_id
+ )
+
+ defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+ ReceiptRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
new file mode 100644
index 00000000..f8993763
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -0,0 +1,266 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError
+from synapse.http.servlet import RestServlet
+
+from ._base import client_v2_pattern, parse_json_dict_from_request
+
+import logging
+import hmac
+from hashlib import sha1
+from synapse.util.async import run_on_reactor
+
+
+# We ought to be using hmac.compare_digest() but on older pythons it doesn't
+# exist. It's a _really minor_ security flaw to use plain string comparison
+# because the timing attack is so obscured by all the other code here it's
+# unlikely to make much difference
+if hasattr(hmac, "compare_digest"):
+ compare_digest = hmac.compare_digest
+else:
+ compare_digest = lambda a, b: a == b
+
+
+logger = logging.getLogger(__name__)
+
+
+class RegisterRestServlet(RestServlet):
+ PATTERN = client_v2_pattern("/register")
+
+ def __init__(self, hs):
+ super(RegisterRestServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.auth_handler = hs.get_handlers().auth_handler
+ self.registration_handler = hs.get_handlers().registration_handler
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ yield run_on_reactor()
+
+ kind = "user"
+ if "kind" in request.args:
+ kind = request.args["kind"][0]
+
+ if kind == "guest":
+ ret = yield self._do_guest_registration()
+ defer.returnValue(ret)
+ return
+ elif kind != "user":
+ raise UnrecognizedRequestError(
+ "Do not understand membership kind: %s" % (kind,)
+ )
+
+ if '/register/email/requestToken' in request.path:
+ ret = yield self.onEmailTokenRequest(request)
+ defer.returnValue(ret)
+
+ body = parse_json_dict_from_request(request)
+
+ # we do basic sanity checks here because the auth layer will store these
+ # in sessions. Pull out the username/password provided to us.
+ desired_password = None
+ if 'password' in body:
+ if (not isinstance(body['password'], basestring) or
+ len(body['password']) > 512):
+ raise SynapseError(400, "Invalid password")
+ desired_password = body["password"]
+
+ desired_username = None
+ if 'username' in body:
+ if (not isinstance(body['username'], basestring) or
+ len(body['username']) > 512):
+ raise SynapseError(400, "Invalid username")
+ desired_username = body['username']
+
+ appservice = None
+ if 'access_token' in request.args:
+ appservice = yield self.auth.get_appservice_by_req(request)
+
+ # fork off as soon as possible for ASes and shared secret auth which
+ # have completely different registration flows to normal users
+
+ # == Application Service Registration ==
+ if appservice:
+ result = yield self._do_appservice_registration(
+ desired_username, request.args["access_token"][0]
+ )
+ defer.returnValue((200, result)) # we throw for non 200 responses
+ return
+
+ # == Shared Secret Registration == (e.g. create new user scripts)
+ if 'mac' in body:
+ # FIXME: Should we really be determining if this is shared secret
+ # auth based purely on the 'mac' key?
+ result = yield self._do_shared_secret_registration(
+ desired_username, desired_password, body["mac"]
+ )
+ defer.returnValue((200, result)) # we throw for non 200 responses
+ return
+
+ # == Normal User Registration == (everyone else)
+ if self.hs.config.disable_registration:
+ raise SynapseError(403, "Registration has been disabled")
+
+ if desired_username is not None:
+ yield self.registration_handler.check_username(desired_username)
+
+ if self.hs.config.enable_registration_captcha:
+ flows = [
+ [LoginType.RECAPTCHA],
+ [LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]
+ ]
+ else:
+ flows = [
+ [LoginType.DUMMY],
+ [LoginType.EMAIL_IDENTITY]
+ ]
+
+ authed, result, params = yield self.auth_handler.check_auth(
+ flows, body, self.hs.get_ip_from_request(request)
+ )
+
+ if not authed:
+ defer.returnValue((401, result))
+ return
+
+ # NB: This may be from the auth handler and NOT from the POST
+ if 'password' not in params:
+ raise SynapseError(400, "Missing password.", Codes.MISSING_PARAM)
+
+ desired_username = params.get("username", None)
+ new_password = params.get("password", None)
+
+ (user_id, token) = yield self.registration_handler.register(
+ localpart=desired_username,
+ password=new_password
+ )
+
+ if result and LoginType.EMAIL_IDENTITY in result:
+ threepid = result[LoginType.EMAIL_IDENTITY]
+
+ for reqd in ['medium', 'address', 'validated_at']:
+ if reqd not in threepid:
+ logger.info("Can't add incomplete 3pid")
+ else:
+ yield self.auth_handler.add_threepid(
+ user_id,
+ threepid['medium'],
+ threepid['address'],
+ threepid['validated_at'],
+ )
+
+ if 'bind_email' in params and params['bind_email']:
+ logger.info("bind_email specified: binding")
+
+ emailThreepid = result[LoginType.EMAIL_IDENTITY]
+ threepid_creds = emailThreepid['threepid_creds']
+ logger.debug("Binding emails %s to %s" % (
+ emailThreepid, user_id
+ ))
+ yield self.identity_handler.bind_threepid(threepid_creds, user_id)
+ else:
+ logger.info("bind_email not specified: not binding email")
+
+ result = self._create_registration_details(user_id, token)
+ defer.returnValue((200, result))
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+ @defer.inlineCallbacks
+ def _do_appservice_registration(self, username, as_token):
+ (user_id, token) = yield self.registration_handler.appservice_register(
+ username, as_token
+ )
+ defer.returnValue(self._create_registration_details(user_id, token))
+
+ @defer.inlineCallbacks
+ def _do_shared_secret_registration(self, username, password, mac):
+ if not self.hs.config.registration_shared_secret:
+ raise SynapseError(400, "Shared secret registration is not enabled")
+
+ user = username.encode("utf-8")
+
+ # str() because otherwise hmac complains that 'unicode' does not
+ # have the buffer interface
+ got_mac = str(mac)
+
+ want_mac = hmac.new(
+ key=self.hs.config.registration_shared_secret,
+ msg=user,
+ digestmod=sha1,
+ ).hexdigest()
+
+ if not compare_digest(want_mac, got_mac):
+ raise SynapseError(
+ 403, "HMAC incorrect",
+ )
+
+ (user_id, token) = yield self.registration_handler.register(
+ localpart=username, password=password
+ )
+ defer.returnValue(self._create_registration_details(user_id, token))
+
+ def _create_registration_details(self, user_id, token):
+ return {
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname,
+ }
+
+ @defer.inlineCallbacks
+ def onEmailTokenRequest(self, request):
+ body = parse_json_dict_from_request(request)
+
+ required = ['id_server', 'client_secret', 'email', 'send_attempt']
+ absent = []
+ for k in required:
+ if k not in body:
+ absent.append(k)
+
+ if len(absent) > 0:
+ raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+ existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ 'email', body['email']
+ )
+
+ if existingUid is not None:
+ raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+
+ ret = yield self.identity_handler.requestEmailToken(**body)
+ defer.returnValue((200, ret))
+
+ @defer.inlineCallbacks
+ def _do_guest_registration(self):
+ if not self.hs.config.allow_guest_access:
+ defer.returnValue((403, "Guest access is disabled"))
+ user_id, _ = yield self.registration_handler.register(generate_token=False)
+ access_token = self.auth_handler.generate_access_token(user_id, ["guest = true"])
+ defer.returnValue((200, {
+ "user_id": user_id,
+ "access_token": access_token,
+ "home_server": self.hs.hostname,
+ }))
+
+
+def register_servlets(hs, http_server):
+ RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
new file mode 100644
index 00000000..efd82815
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.http.servlet import (
+ RestServlet, parse_string, parse_integer, parse_boolean
+)
+from synapse.handlers.sync import SyncConfig
+from synapse.types import StreamToken
+from synapse.events import FrozenEvent
+from synapse.events.utils import (
+ serialize_event, format_event_for_client_v2_without_event_id,
+)
+from synapse.api.filtering import FilterCollection
+from ._base import client_v2_pattern
+
+import copy
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class SyncRestServlet(RestServlet):
+ """
+
+ GET parameters::
+ timeout(int): How long to wait for new events in milliseconds.
+ since(batch_token): Batch token when asking for incremental deltas.
+ set_presence(str): What state the device presence should be set to.
+ default is "online".
+ filter(filter_id): A filter to apply to the events returned.
+
+ Response JSON::
+ {
+ "next_batch": // batch token for the next /sync
+ "presence": // presence data for the user.
+ "rooms": {
+ "joined": { // Joined rooms being updated.
+ "${room_id}": { // Id of the room being updated
+ "event_map": // Map of EventID -> event JSON.
+ "timeline": { // The recent events in the room if gap is "true"
+ "limited": // Was the per-room event limit exceeded?
+ // otherwise the next events in the room.
+ "events": [] // list of EventIDs in the "event_map".
+ "prev_batch": // back token for getting previous events.
+ }
+ "state": {"events": []} // list of EventIDs updating the
+ // current state to be what it should
+ // be at the end of the batch.
+ "ephemeral": {"events": []} // list of event objects
+ }
+ },
+ "invited": {}, // Invited rooms being updated.
+ "archived": {} // Archived rooms being updated.
+ }
+ }
+ """
+
+ PATTERN = client_v2_pattern("/sync$")
+ ALLOWED_PRESENCE = set(["online", "offline"])
+
+ def __init__(self, hs):
+ super(SyncRestServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.event_stream_handler = hs.get_handlers().event_stream_handler
+ self.sync_handler = hs.get_handlers().sync_handler
+ self.clock = hs.get_clock()
+ self.filtering = hs.get_filtering()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ user, token_id, _ = yield self.auth.get_user_by_req(request)
+
+ timeout = parse_integer(request, "timeout", default=0)
+ since = parse_string(request, "since")
+ set_presence = parse_string(
+ request, "set_presence", default="online",
+ allowed_values=self.ALLOWED_PRESENCE
+ )
+ filter_id = parse_string(request, "filter", default=None)
+ full_state = parse_boolean(request, "full_state", default=False)
+
+ logger.info(
+ "/sync: user=%r, timeout=%r, since=%r,"
+ " set_presence=%r, filter_id=%r" % (
+ user, timeout, since, set_presence, filter_id
+ )
+ )
+
+ try:
+ filter = yield self.filtering.get_user_filter(
+ user.localpart, filter_id
+ )
+ except:
+ filter = FilterCollection({})
+
+ sync_config = SyncConfig(
+ user=user,
+ filter=filter,
+ )
+
+ if since is not None:
+ since_token = StreamToken.from_string(since)
+ else:
+ since_token = None
+
+ if set_presence == "online":
+ yield self.event_stream_handler.started_stream(user)
+
+ try:
+ sync_result = yield self.sync_handler.wait_for_sync_for_user(
+ sync_config, since_token=since_token, timeout=timeout,
+ full_state=full_state
+ )
+ finally:
+ if set_presence == "online":
+ self.event_stream_handler.stopped_stream(user)
+
+ time_now = self.clock.time_msec()
+
+ joined = self.encode_joined(
+ sync_result.joined, filter, time_now, token_id
+ )
+
+ invited = self.encode_invited(
+ sync_result.invited, filter, time_now, token_id
+ )
+
+ archived = self.encode_archived(
+ sync_result.archived, filter, time_now, token_id
+ )
+
+ response_content = {
+ "presence": self.encode_presence(
+ sync_result.presence, filter, time_now
+ ),
+ "rooms": {
+ "joined": joined,
+ "invited": invited,
+ "archived": archived,
+ },
+ "next_batch": sync_result.next_batch.to_string(),
+ }
+
+ defer.returnValue((200, response_content))
+
+ def encode_presence(self, events, filter, time_now):
+ formatted = []
+ for event in events:
+ event = copy.deepcopy(event)
+ event['sender'] = event['content'].pop('user_id')
+ formatted.append(event)
+ return {"events": filter.filter_presence(formatted)}
+
+ def encode_joined(self, rooms, filter, time_now, token_id):
+ """
+ Encode the joined rooms in a sync result
+
+ :param list[synapse.handlers.sync.JoinedSyncResult] rooms: list of sync
+ results for rooms this user is joined to
+ :param FilterCollection filter: filters to apply to the results
+ :param int time_now: current time - used as a baseline for age
+ calculations
+ :param int token_id: ID of the user's auth token - used for namespacing
+ of transaction IDs
+
+ :return: the joined rooms list, in our response format
+ :rtype: dict[str, dict[str, object]]
+ """
+ joined = {}
+ for room in rooms:
+ joined[room.room_id] = self.encode_room(
+ room, filter, time_now, token_id
+ )
+
+ return joined
+
+ def encode_invited(self, rooms, filter, time_now, token_id):
+ """
+ Encode the invited rooms in a sync result
+
+ :param list[synapse.handlers.sync.InvitedSyncResult] rooms: list of
+ sync results for rooms this user is joined to
+ :param FilterCollection filter: filters to apply to the results
+ :param int time_now: current time - used as a baseline for age
+ calculations
+ :param int token_id: ID of the user's auth token - used for namespacing
+ of transaction IDs
+
+ :return: the invited rooms list, in our response format
+ :rtype: dict[str, dict[str, object]]
+ """
+ invited = {}
+ for room in rooms:
+ invite = serialize_event(
+ room.invite, time_now, token_id=token_id,
+ event_format=format_event_for_client_v2_without_event_id,
+ )
+ invited_state = invite.get("unsigned", {}).pop("invite_room_state", [])
+ invited_state.append(invite)
+ invited[room.room_id] = {
+ "invite_state": {"events": invited_state}
+ }
+
+ return invited
+
+ def encode_archived(self, rooms, filter, time_now, token_id):
+ """
+ Encode the archived rooms in a sync result
+
+ :param list[synapse.handlers.sync.ArchivedSyncResult] rooms: list of
+ sync results for rooms this user is joined to
+ :param FilterCollection filter: filters to apply to the results
+ :param int time_now: current time - used as a baseline for age
+ calculations
+ :param int token_id: ID of the user's auth token - used for namespacing
+ of transaction IDs
+
+ :return: the invited rooms list, in our response format
+ :rtype: dict[str, dict[str, object]]
+ """
+ joined = {}
+ for room in rooms:
+ joined[room.room_id] = self.encode_room(
+ room, filter, time_now, token_id, joined=False
+ )
+
+ return joined
+
+ @staticmethod
+ def encode_room(room, filter, time_now, token_id, joined=True):
+ """
+ :param JoinedSyncResult|ArchivedSyncResult room: sync result for a
+ single room
+ :param FilterCollection filter: filters to apply to the results
+ :param int time_now: current time - used as a baseline for age
+ calculations
+ :param int token_id: ID of the user's auth token - used for namespacing
+ of transaction IDs
+ :param joined: True if the user is joined to this room - will mean
+ we handle ephemeral events
+
+ :return: the room, encoded in our response format
+ :rtype: dict[str, object]
+ """
+ event_map = {}
+ state_dict = room.state
+ timeline_events = filter.filter_room_timeline(room.timeline.events)
+
+ state_dict = SyncRestServlet._rollback_state_for_timeline(
+ state_dict, timeline_events)
+
+ state_events = filter.filter_room_state(state_dict.values())
+ state_event_ids = []
+ for event in state_events:
+ # TODO(mjark): Respect formatting requirements in the filter.
+ event_map[event.event_id] = serialize_event(
+ event, time_now, token_id=token_id,
+ event_format=format_event_for_client_v2_without_event_id,
+ )
+ state_event_ids.append(event.event_id)
+
+ timeline_event_ids = []
+ for event in timeline_events:
+ # TODO(mjark): Respect formatting requirements in the filter.
+ event_map[event.event_id] = serialize_event(
+ event, time_now, token_id=token_id,
+ event_format=format_event_for_client_v2_without_event_id,
+ )
+ timeline_event_ids.append(event.event_id)
+
+ private_user_data = filter.filter_room_private_user_data(
+ room.private_user_data
+ )
+
+ result = {
+ "event_map": event_map,
+ "timeline": {
+ "events": timeline_event_ids,
+ "prev_batch": room.timeline.prev_batch.to_string(),
+ "limited": room.timeline.limited,
+ },
+ "state": {"events": state_event_ids},
+ "private_user_data": {"events": private_user_data},
+ }
+
+ if joined:
+ ephemeral_events = filter.filter_room_ephemeral(room.ephemeral)
+ result["ephemeral"] = {"events": ephemeral_events}
+
+ return result
+
+ @staticmethod
+ def _rollback_state_for_timeline(state, timeline):
+ """
+ Wind the state dictionary backwards, so that it represents the
+ state at the start of the timeline, rather than at the end.
+
+ :param dict[(str, str), synapse.events.EventBase] state: the
+ state dictionary. Will be updated to the state before the timeline.
+ :param list[synapse.events.EventBase] timeline: the event timeline
+ :return: updated state dictionary
+ """
+ logger.debug("Processing state dict %r; timeline %r", state,
+ [e.get_dict() for e in timeline])
+
+ result = state.copy()
+
+ for timeline_event in reversed(timeline):
+ if not timeline_event.is_state():
+ continue
+
+ event_key = (timeline_event.type, timeline_event.state_key)
+
+ logger.debug("Considering %s for removal", event_key)
+
+ state_event = result.get(event_key)
+ if (state_event is None or
+ state_event.event_id != timeline_event.event_id):
+ # the event in the timeline isn't present in the state
+ # dictionary.
+ #
+ # the most likely cause for this is that there was a fork in
+ # the event graph, and the state is no longer valid. Really,
+ # the event shouldn't be in the timeline. We're going to ignore
+ # it for now, however.
+ logger.warn("Found state event %r in timeline which doesn't "
+ "match state dictionary", timeline_event)
+ continue
+
+ prev_event_id = timeline_event.unsigned.get("replaces_state", None)
+ logger.debug("Replacing %s with %s in state dict",
+ timeline_event.event_id, prev_event_id)
+
+ if prev_event_id is None:
+ del result[event_key]
+ else:
+ result[event_key] = FrozenEvent({
+ "type": timeline_event.type,
+ "state_key": timeline_event.state_key,
+ "content": timeline_event.unsigned['prev_content'],
+ "sender": timeline_event.unsigned['prev_sender'],
+ "event_id": prev_event_id,
+ "room_id": timeline_event.room_id,
+ })
+ logger.debug("New value: %r", result.get(event_key))
+
+ return result
+
+
+def register_servlets(hs, http_server):
+ SyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
new file mode 100644
index 00000000..35482ae6
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import client_v2_pattern
+
+from synapse.http.servlet import RestServlet
+from synapse.api.errors import AuthError, SynapseError
+
+from twisted.internet import defer
+
+import logging
+
+import simplejson as json
+
+logger = logging.getLogger(__name__)
+
+
+class TagListServlet(RestServlet):
+ """
+ GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1
+ """
+ PATTERN = client_v2_pattern(
+ "/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags"
+ )
+
+ def __init__(self, hs):
+ super(TagListServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id, room_id):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ if user_id != auth_user.to_string():
+ raise AuthError(403, "Cannot get tags for other users.")
+
+ tags = yield self.store.get_tags_for_room(user_id, room_id)
+
+ defer.returnValue((200, {"tags": tags}))
+
+
+class TagServlet(RestServlet):
+ """
+ PUT /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1
+ DELETE /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1
+ """
+ PATTERN = client_v2_pattern(
+ "/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags/(?P<tag>[^/]*)"
+ )
+
+ def __init__(self, hs):
+ super(TagServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.notifier = hs.get_notifier()
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id, room_id, tag):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ if user_id != auth_user.to_string():
+ raise AuthError(403, "Cannot add tags for other users.")
+
+ try:
+ content_bytes = request.content.read()
+ body = json.loads(content_bytes)
+ except:
+ raise SynapseError(400, "Invalid tag JSON")
+
+ max_id = yield self.store.add_tag_to_room(user_id, room_id, tag, body)
+
+ yield self.notifier.on_new_event(
+ "private_user_data_key", max_id, users=[user_id]
+ )
+
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request, user_id, room_id, tag):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ if user_id != auth_user.to_string():
+ raise AuthError(403, "Cannot add tags for other users.")
+
+ max_id = yield self.store.remove_tag_from_room(user_id, room_id, tag)
+
+ yield self.notifier.on_new_event(
+ "private_user_data_key", max_id, users=[user_id]
+ )
+
+ defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+ TagListServlet(hs).register(http_server)
+ TagServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py
new file mode 100644
index 00000000..901e7779
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/tokenrefresh.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError, StoreError, SynapseError
+from synapse.http.servlet import RestServlet
+
+from ._base import client_v2_pattern, parse_json_dict_from_request
+
+
+class TokenRefreshRestServlet(RestServlet):
+ """
+ Exchanges refresh tokens for a pair of an access token and a new refresh
+ token.
+ """
+ PATTERN = client_v2_pattern("/tokenrefresh")
+
+ def __init__(self, hs):
+ super(TokenRefreshRestServlet, self).__init__()
+ self.hs = hs
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ body = parse_json_dict_from_request(request)
+ try:
+ old_refresh_token = body["refresh_token"]
+ auth_handler = self.hs.get_handlers().auth_handler
+ (user_id, new_refresh_token) = yield self.store.exchange_refresh_token(
+ old_refresh_token, auth_handler.generate_refresh_token)
+ new_access_token = yield auth_handler.issue_access_token(user_id)
+ defer.returnValue((200, {
+ "access_token": new_access_token,
+ "refresh_token": new_refresh_token,
+ }))
+ except KeyError:
+ raise SynapseError(400, "Missing required key 'refresh_token'.")
+ except StoreError:
+ raise AuthError(403, "Did not recognize refresh token")
+
+
+def register_servlets(hs, http_server):
+ TokenRefreshRestServlet(hs).register(http_server)
diff --git a/synapse/rest/key/__init__.py b/synapse/rest/key/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/synapse/rest/key/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/synapse/rest/key/v1/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py
new file mode 100644
index 00000000..6df46969
--- /dev/null
+++ b/synapse/rest/key/v1/server_key_resource.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.web.resource import Resource
+from synapse.http.server import respond_with_json_bytes
+from signedjson.sign import sign_json
+from unpaddedbase64 import encode_base64
+from canonicaljson import encode_canonical_json
+from OpenSSL import crypto
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class LocalKey(Resource):
+ """HTTP resource containing encoding the TLS X.509 certificate and NACL
+ signature verification keys for this server::
+
+ GET /key HTTP/1.1
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ {
+ "server_name": "this.server.example.com"
+ "verify_keys": {
+ "algorithm:version": # base64 encoded NACL verification key.
+ },
+ "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
+ "signatures": {
+ "this.server.example.com": {
+ "algorithm:version": # NACL signature for this server.
+ }
+ }
+ }
+ """
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.version_string = hs.version_string
+ self.response_body = encode_canonical_json(
+ self.response_json_object(hs.config)
+ )
+ Resource.__init__(self)
+
+ @staticmethod
+ def response_json_object(server_config):
+ verify_keys = {}
+ for key in server_config.signing_key:
+ verify_key_bytes = key.verify_key.encode()
+ key_id = "%s:%s" % (key.alg, key.version)
+ verify_keys[key_id] = encode_base64(verify_key_bytes)
+
+ x509_certificate_bytes = crypto.dump_certificate(
+ crypto.FILETYPE_ASN1,
+ server_config.tls_certificate
+ )
+ json_object = {
+ u"server_name": server_config.server_name,
+ u"verify_keys": verify_keys,
+ u"tls_certificate": encode_base64(x509_certificate_bytes)
+ }
+ for key in server_config.signing_key:
+ json_object = sign_json(
+ json_object,
+ server_config.server_name,
+ key,
+ )
+
+ return json_object
+
+ def render_GET(self, request):
+ return respond_with_json_bytes(
+ request, 200, self.response_body,
+ version_string=self.version_string
+ )
+
+ def getChild(self, name, request):
+ if name == '':
+ return self
diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py
new file mode 100644
index 00000000..1c14791b
--- /dev/null
+++ b/synapse/rest/key/v2/__init__.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.web.resource import Resource
+from .local_key_resource import LocalKey
+from .remote_key_resource import RemoteKey
+
+
+class KeyApiV2Resource(Resource):
+ def __init__(self, hs):
+ Resource.__init__(self)
+ self.putChild("server", LocalKey(hs))
+ self.putChild("query", RemoteKey(hs))
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
new file mode 100644
index 00000000..ef7699d5
--- /dev/null
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.web.resource import Resource
+from synapse.http.server import respond_with_json_bytes
+from signedjson.sign import sign_json
+from unpaddedbase64 import encode_base64
+from canonicaljson import encode_canonical_json
+from hashlib import sha256
+from OpenSSL import crypto
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class LocalKey(Resource):
+ """HTTP resource containing encoding the TLS X.509 certificate and NACL
+ signature verification keys for this server::
+
+ GET /_matrix/key/v2/server/a.key.id HTTP/1.1
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ {
+ "valid_until_ts": # integer posix timestamp when this result expires.
+ "server_name": "this.server.example.com"
+ "verify_keys": {
+ "algorithm:version": {
+ "key": # base64 encoded NACL verification key.
+ }
+ },
+ "old_verify_keys": {
+ "algorithm:version": {
+ "expired_ts": # integer posix timestamp when the key expired.
+ "key": # base64 encoded NACL verification key.
+ }
+ }
+ "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
+ "signatures": {
+ "this.server.example.com": {
+ "algorithm:version": # NACL signature for this server
+ }
+ }
+ }
+ """
+
+ isLeaf = True
+
+ def __init__(self, hs):
+ self.version_string = hs.version_string
+ self.config = hs.config
+ self.clock = hs.clock
+ self.update_response_body(self.clock.time_msec())
+ Resource.__init__(self)
+
+ def update_response_body(self, time_now_msec):
+ refresh_interval = self.config.key_refresh_interval
+ self.valid_until_ts = int(time_now_msec + refresh_interval)
+ self.response_body = encode_canonical_json(self.response_json_object())
+
+ def response_json_object(self):
+ verify_keys = {}
+ for key in self.config.signing_key:
+ verify_key_bytes = key.verify_key.encode()
+ key_id = "%s:%s" % (key.alg, key.version)
+ verify_keys[key_id] = {
+ u"key": encode_base64(verify_key_bytes)
+ }
+
+ old_verify_keys = {}
+ for key in self.config.old_signing_keys:
+ key_id = "%s:%s" % (key.alg, key.version)
+ verify_key_bytes = key.encode()
+ old_verify_keys[key_id] = {
+ u"key": encode_base64(verify_key_bytes),
+ u"expired_ts": key.expired,
+ }
+
+ x509_certificate_bytes = crypto.dump_certificate(
+ crypto.FILETYPE_ASN1,
+ self.config.tls_certificate
+ )
+
+ sha256_fingerprint = sha256(x509_certificate_bytes).digest()
+
+ json_object = {
+ u"valid_until_ts": self.valid_until_ts,
+ u"server_name": self.config.server_name,
+ u"verify_keys": verify_keys,
+ u"old_verify_keys": old_verify_keys,
+ u"tls_fingerprints": [{
+ u"sha256": encode_base64(sha256_fingerprint),
+ }]
+ }
+ for key in self.config.signing_key:
+ json_object = sign_json(
+ json_object,
+ self.config.server_name,
+ key,
+ )
+ return json_object
+
+ def render_GET(self, request):
+ time_now = self.clock.time_msec()
+ # Update the expiry time if less than half the interval remains.
+ if time_now + self.config.key_refresh_interval / 2 > self.valid_until_ts:
+ self.update_response_body(time_now)
+ return respond_with_json_bytes(
+ request, 200, self.response_body,
+ version_string=self.version_string
+ )
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
new file mode 100644
index 00000000..e434847b
--- /dev/null
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -0,0 +1,242 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import request_handler, respond_with_json_bytes
+from synapse.http.servlet import parse_integer
+from synapse.api.errors import SynapseError, Codes
+
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+
+from io import BytesIO
+import json
+import logging
+logger = logging.getLogger(__name__)
+
+
+class RemoteKey(Resource):
+ """HTTP resource for retreiving the TLS certificate and NACL signature
+ verification keys for a collection of servers. Checks that the reported
+ X.509 TLS certificate matches the one used in the HTTPS connection. Checks
+ that the NACL signature for the remote server is valid. Returns a dict of
+ JSON signed by both the remote server and by this server.
+
+ Supports individual GET APIs and a bulk query POST API.
+
+ Requsts:
+
+ GET /_matrix/key/v2/query/remote.server.example.com HTTP/1.1
+
+ GET /_matrix/key/v2/query/remote.server.example.com/a.key.id HTTP/1.1
+
+ POST /_matrix/v2/query HTTP/1.1
+ Content-Type: application/json
+ {
+ "server_keys": {
+ "remote.server.example.com": {
+ "a.key.id": {
+ "minimum_valid_until_ts": 1234567890123
+ }
+ }
+ }
+ }
+
+ Response:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ {
+ "server_keys": [
+ {
+ "server_name": "remote.server.example.com"
+ "valid_until_ts": # posix timestamp
+ "verify_keys": {
+ "a.key.id": { # The identifier for a key.
+ key: "" # base64 encoded verification key.
+ }
+ }
+ "old_verify_keys": {
+ "an.old.key.id": { # The identifier for an old key.
+ key: "", # base64 encoded key
+ "expired_ts": 0, # when the key stop being used.
+ }
+ }
+ "tls_fingerprints": [
+ { "sha256": # fingerprint }
+ ]
+ "signatures": {
+ "remote.server.example.com": {...}
+ "this.server.example.com": {...}
+ }
+ }
+ ]
+ }
+ """
+
+ isLeaf = True
+
+ def __init__(self, hs):
+ self.keyring = hs.get_keyring()
+ self.store = hs.get_datastore()
+ self.version_string = hs.version_string
+ self.clock = hs.get_clock()
+
+ def render_GET(self, request):
+ self.async_render_GET(request)
+ return NOT_DONE_YET
+
+ @request_handler
+ @defer.inlineCallbacks
+ def async_render_GET(self, request):
+ if len(request.postpath) == 1:
+ server, = request.postpath
+ query = {server: {}}
+ elif len(request.postpath) == 2:
+ server, key_id = request.postpath
+ minimum_valid_until_ts = parse_integer(
+ request, "minimum_valid_until_ts"
+ )
+ arguments = {}
+ if minimum_valid_until_ts is not None:
+ arguments["minimum_valid_until_ts"] = minimum_valid_until_ts
+ query = {server: {key_id: arguments}}
+ else:
+ raise SynapseError(
+ 404, "Not found %r" % request.postpath, Codes.NOT_FOUND
+ )
+ yield self.query_keys(request, query, query_remote_on_cache_miss=True)
+
+ def render_POST(self, request):
+ self.async_render_POST(request)
+ return NOT_DONE_YET
+
+ @request_handler
+ @defer.inlineCallbacks
+ def async_render_POST(self, request):
+ try:
+ content = json.loads(request.content.read())
+ if type(content) != dict:
+ raise ValueError()
+ except ValueError:
+ raise SynapseError(
+ 400, "Content must be JSON object.", errcode=Codes.NOT_JSON
+ )
+
+ query = content["server_keys"]
+
+ yield self.query_keys(request, query, query_remote_on_cache_miss=True)
+
+ @defer.inlineCallbacks
+ def query_keys(self, request, query, query_remote_on_cache_miss=False):
+ logger.info("Handling query for keys %r", query)
+ store_queries = []
+ for server_name, key_ids in query.items():
+ if not key_ids:
+ key_ids = (None,)
+ for key_id in key_ids:
+ store_queries.append((server_name, key_id, None))
+
+ cached = yield self.store.get_server_keys_json(store_queries)
+
+ json_results = set()
+
+ time_now_ms = self.clock.time_msec()
+
+ cache_misses = dict()
+ for (server_name, key_id, from_server), results in cached.items():
+ results = [
+ (result["ts_added_ms"], result) for result in results
+ ]
+
+ if not results and key_id is not None:
+ cache_misses.setdefault(server_name, set()).add(key_id)
+ continue
+
+ if key_id is not None:
+ ts_added_ms, most_recent_result = max(results)
+ ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
+ req_key = query.get(server_name, {}).get(key_id, {})
+ req_valid_until = req_key.get("minimum_valid_until_ts")
+ miss = False
+ if req_valid_until is not None:
+ if ts_valid_until_ms < req_valid_until:
+ logger.debug(
+ "Cached response for %r/%r is older than requested"
+ ": valid_until (%r) < minimum_valid_until (%r)",
+ server_name, key_id,
+ ts_valid_until_ms, req_valid_until
+ )
+ miss = True
+ else:
+ logger.debug(
+ "Cached response for %r/%r is newer than requested"
+ ": valid_until (%r) >= minimum_valid_until (%r)",
+ server_name, key_id,
+ ts_valid_until_ms, req_valid_until
+ )
+ elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
+ logger.debug(
+ "Cached response for %r/%r is too old"
+ ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
+ server_name, key_id,
+ ts_added_ms, ts_valid_until_ms, time_now_ms
+ )
+ # We more than half way through the lifetime of the
+ # response. We should fetch a fresh copy.
+ miss = True
+ else:
+ logger.debug(
+ "Cached response for %r/%r is still valid"
+ ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
+ server_name, key_id,
+ ts_added_ms, ts_valid_until_ms, time_now_ms
+ )
+
+ if miss:
+ cache_misses.setdefault(server_name, set()).add(key_id)
+ json_results.add(bytes(most_recent_result["key_json"]))
+ else:
+ for ts_added, result in results:
+ json_results.add(bytes(result["key_json"]))
+
+ if cache_misses and query_remote_on_cache_miss:
+ for server_name, key_ids in cache_misses.items():
+ try:
+ yield self.keyring.get_server_verify_key_v2_direct(
+ server_name, key_ids
+ )
+ except:
+ logger.exception("Failed to get key for %r", server_name)
+ pass
+ yield self.query_keys(
+ request, query, query_remote_on_cache_miss=False
+ )
+ else:
+ result_io = BytesIO()
+ result_io.write(b"{\"server_keys\":")
+ sep = b"["
+ for json_bytes in json_results:
+ result_io.write(sep)
+ result_io.write(json_bytes)
+ sep = b","
+ if sep == b"[":
+ result_io.write(sep)
+ result_io.write(b"]}")
+
+ respond_with_json_bytes(
+ request, 200, result_io.getvalue(),
+ version_string=self.version_string
+ )
diff --git a/synapse/rest/media/__init__.py b/synapse/rest/media/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/synapse/rest/media/__init__.py
diff --git a/synapse/rest/media/v0/__init__.py b/synapse/rest/media/v0/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/synapse/rest/media/v0/__init__.py
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
new file mode 100644
index 00000000..e4fa8c46
--- /dev/null
+++ b/synapse/rest/media/v0/content_repository.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import respond_with_json_bytes
+
+from synapse.util.stringutils import random_string
+from synapse.api.errors import (
+ cs_exception, SynapseError, CodeMessageException, Codes, cs_error
+)
+
+from twisted.protocols.basic import FileSender
+from twisted.web import server, resource
+from twisted.internet import defer
+
+import base64
+import simplejson as json
+import logging
+import os
+import re
+
+logger = logging.getLogger(__name__)
+
+
+class ContentRepoResource(resource.Resource):
+ """Provides file uploading and downloading.
+
+ Uploads are POSTed to wherever this Resource is linked to. This resource
+ returns a "content token" which can be used to GET this content again. The
+ token is typically a path, but it may not be. Tokens can expire, be
+ one-time uses, etc.
+
+ In this case, the token is a path to the file and contains 3 interesting
+ sections:
+ - User ID base64d (for namespacing content to each user)
+ - random 24 char string
+ - Content type base64d (so we can return it when clients GET it)
+
+ """
+ isLeaf = True
+
+ def __init__(self, hs, directory, auth, external_addr):
+ resource.Resource.__init__(self)
+ self.hs = hs
+ self.directory = directory
+ self.auth = auth
+ self.external_addr = external_addr.rstrip('/')
+ self.max_upload_size = hs.config.max_upload_size
+
+ if not os.path.isdir(self.directory):
+ os.mkdir(self.directory)
+ logger.info("ContentRepoResource : Created %s directory.",
+ self.directory)
+
+ @defer.inlineCallbacks
+ def map_request_to_name(self, request):
+ # auth the user
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+
+ # namespace all file uploads on the user
+ prefix = base64.urlsafe_b64encode(
+ auth_user.to_string()
+ ).replace('=', '')
+
+ # use a random string for the main portion
+ main_part = random_string(24)
+
+ # suffix with a file extension if we can make one. This is nice to
+ # provide a hint to clients on the file information. We will also reuse
+ # this info to spit back the content type to the client.
+ suffix = ""
+ if request.requestHeaders.hasHeader("Content-Type"):
+ content_type = request.requestHeaders.getRawHeaders(
+ "Content-Type")[0]
+ suffix = "." + base64.urlsafe_b64encode(content_type)
+ if (content_type.split("/")[0].lower() in
+ ["image", "video", "audio"]):
+ file_ext = content_type.split("/")[-1]
+ # be a little paranoid and only allow a-z
+ file_ext = re.sub("[^a-z]", "", file_ext)
+ suffix += "." + file_ext
+
+ file_name = prefix + main_part + suffix
+ file_path = os.path.join(self.directory, file_name)
+ logger.info("User %s is uploading a file to path %s",
+ auth_user.to_string(),
+ file_path)
+
+ # keep trying to make a non-clashing file, with a sensible max attempts
+ attempts = 0
+ while os.path.exists(file_path):
+ main_part = random_string(24)
+ file_name = prefix + main_part + suffix
+ file_path = os.path.join(self.directory, file_name)
+ attempts += 1
+ if attempts > 25: # really? Really?
+ raise SynapseError(500, "Unable to create file.")
+
+ defer.returnValue(file_path)
+
+ def render_GET(self, request):
+ # no auth here on purpose, to allow anyone to view, even across home
+ # servers.
+
+ # TODO: A little crude here, we could do this better.
+ filename = request.path.split('/')[-1]
+ # be paranoid
+ filename = re.sub("[^0-9A-z.-_]", "", filename)
+
+ file_path = self.directory + "/" + filename
+
+ logger.debug("Searching for %s", file_path)
+
+ if os.path.isfile(file_path):
+ # filename has the content type
+ base64_contentype = filename.split(".")[1]
+ content_type = base64.urlsafe_b64decode(base64_contentype)
+ logger.info("Sending file %s", file_path)
+ f = open(file_path, 'rb')
+ request.setHeader('Content-Type', content_type)
+
+ # cache for at least a day.
+ # XXX: we might want to turn this off for data we don't want to
+ # recommend caching as it's sensitive or private - or at least
+ # select private. don't bother setting Expires as all our matrix
+ # clients are smart enough to be happy with Cache-Control (right?)
+ request.setHeader(
+ "Cache-Control", "public,max-age=86400,s-maxage=86400"
+ )
+
+ d = FileSender().beginFileTransfer(f, request)
+
+ # after the file has been sent, clean up and finish the request
+ def cbFinished(ignored):
+ f.close()
+ request.finish()
+ d.addCallback(cbFinished)
+ else:
+ respond_with_json_bytes(
+ request,
+ 404,
+ json.dumps(cs_error("Not found", code=Codes.NOT_FOUND)),
+ send_cors=True)
+
+ return server.NOT_DONE_YET
+
+ def render_POST(self, request):
+ self._async_render(request)
+ return server.NOT_DONE_YET
+
+ def render_OPTIONS(self, request):
+ respond_with_json_bytes(request, 200, {}, send_cors=True)
+ return server.NOT_DONE_YET
+
+ @defer.inlineCallbacks
+ def _async_render(self, request):
+ try:
+ # TODO: The checks here are a bit late. The content will have
+ # already been uploaded to a tmp file at this point
+ content_length = request.getHeader("Content-Length")
+ if content_length is None:
+ raise SynapseError(
+ msg="Request must specify a Content-Length", code=400
+ )
+ if int(content_length) > self.max_upload_size:
+ raise SynapseError(
+ msg="Upload request body is too large",
+ code=413,
+ )
+
+ fname = yield self.map_request_to_name(request)
+
+ # TODO I have a suspicious feeling this is just going to block
+ with open(fname, "wb") as f:
+ f.write(request.content.read())
+
+ # FIXME (erikj): These should use constants.
+ file_name = os.path.basename(fname)
+ # FIXME: we can't assume what the repo's public mounted path is
+ # ...plus self-signed SSL won't work to remote clients anyway
+ # ...and we can't assume that it's SSL anyway, as we might want to
+ # serve it via the non-SSL listener...
+ url = "%s/_matrix/content/%s" % (
+ self.external_addr, file_name
+ )
+
+ respond_with_json_bytes(request, 200,
+ json.dumps({"content_token": url}),
+ send_cors=True)
+
+ except CodeMessageException as e:
+ logger.exception(e)
+ respond_with_json_bytes(request, e.code,
+ json.dumps(cs_exception(e)))
+ except Exception as e:
+ logger.error("Failed to store file: %s" % e)
+ respond_with_json_bytes(
+ request,
+ 500,
+ json.dumps({"error": "Internal server error"}),
+ send_cors=True)
diff --git a/synapse/rest/media/v1/__init__.py b/synapse/rest/media/v1/__init__.py
new file mode 100644
index 00000000..d6c66905
--- /dev/null
+++ b/synapse/rest/media/v1/__init__.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import PIL.Image
+
+# check for JPEG support.
+try:
+ PIL.Image._getdecoder("rgb", "jpeg", None)
+except IOError as e:
+ if str(e).startswith("decoder jpeg not available"):
+ raise Exception(
+ "FATAL: jpeg codec not supported. Install pillow correctly! "
+ " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
+ " pip install pillow --user'"
+ )
+except Exception:
+ # any other exception is fine
+ pass
+
+
+# check for PNG support.
+try:
+ PIL.Image._getdecoder("rgb", "zip", None)
+except IOError as e:
+ if str(e).startswith("decoder zip not available"):
+ raise Exception(
+ "FATAL: zip codec not supported. Install pillow correctly! "
+ " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&"
+ " pip install pillow --user'"
+ )
+except Exception:
+ # any other exception is fine
+ pass
diff --git a/synapse/rest/media/v1/base_resource.py b/synapse/rest/media/v1/base_resource.py
new file mode 100644
index 00000000..b2aeb8c9
--- /dev/null
+++ b/synapse/rest/media/v1/base_resource.py
@@ -0,0 +1,456 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .thumbnailer import Thumbnailer
+
+from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.http.server import respond_with_json
+from synapse.util.stringutils import random_string
+from synapse.api.errors import (
+ cs_error, Codes, SynapseError
+)
+
+from twisted.internet import defer, threads
+from twisted.web.resource import Resource
+from twisted.protocols.basic import FileSender
+
+from synapse.util.async import ObservableDeferred
+from synapse.util.stringutils import is_ascii
+
+import os
+
+import cgi
+import logging
+import urllib
+import urlparse
+
+logger = logging.getLogger(__name__)
+
+
+def parse_media_id(request):
+ try:
+ # This allows users to append e.g. /test.png to the URL. Useful for
+ # clients that parse the URL to see content type.
+ server_name, media_id = request.postpath[:2]
+ file_name = None
+ if len(request.postpath) > 2:
+ try:
+ file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8")
+ except UnicodeDecodeError:
+ pass
+ return server_name, media_id, file_name
+ except:
+ raise SynapseError(
+ 404,
+ "Invalid media id token %r" % (request.postpath,),
+ Codes.UNKNOWN,
+ )
+
+
+class BaseMediaResource(Resource):
+ isLeaf = True
+
+ def __init__(self, hs, filepaths):
+ Resource.__init__(self)
+ self.auth = hs.get_auth()
+ self.client = MatrixFederationHttpClient(hs)
+ self.clock = hs.get_clock()
+ self.server_name = hs.hostname
+ self.store = hs.get_datastore()
+ self.max_upload_size = hs.config.max_upload_size
+ self.max_image_pixels = hs.config.max_image_pixels
+ self.filepaths = filepaths
+ self.version_string = hs.version_string
+ self.downloads = {}
+ self.dynamic_thumbnails = hs.config.dynamic_thumbnails
+ self.thumbnail_requirements = hs.config.thumbnail_requirements
+
+ def _respond_404(self, request):
+ respond_with_json(
+ request, 404,
+ cs_error(
+ "Not found %r" % (request.postpath,),
+ code=Codes.NOT_FOUND,
+ ),
+ send_cors=True
+ )
+
+ @staticmethod
+ def _makedirs(filepath):
+ dirname = os.path.dirname(filepath)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ def _get_remote_media(self, server_name, media_id):
+ key = (server_name, media_id)
+ download = self.downloads.get(key)
+ if download is None:
+ download = self._get_remote_media_impl(server_name, media_id)
+ download = ObservableDeferred(
+ download,
+ consumeErrors=True
+ )
+ self.downloads[key] = download
+
+ @download.addBoth
+ def callback(media_info):
+ del self.downloads[key]
+ return media_info
+ return download.observe()
+
+ @defer.inlineCallbacks
+ def _get_remote_media_impl(self, server_name, media_id):
+ media_info = yield self.store.get_cached_remote_media(
+ server_name, media_id
+ )
+ if not media_info:
+ media_info = yield self._download_remote_file(
+ server_name, media_id
+ )
+ defer.returnValue(media_info)
+
+ @defer.inlineCallbacks
+ def _download_remote_file(self, server_name, media_id):
+ file_id = random_string(24)
+
+ fname = self.filepaths.remote_media_filepath(
+ server_name, file_id
+ )
+ self._makedirs(fname)
+
+ try:
+ with open(fname, "wb") as f:
+ request_path = "/".join((
+ "/_matrix/media/v1/download", server_name, media_id,
+ ))
+ length, headers = yield self.client.get_file(
+ server_name, request_path, output_stream=f,
+ max_size=self.max_upload_size,
+ )
+ media_type = headers["Content-Type"][0]
+ time_now_ms = self.clock.time_msec()
+
+ content_disposition = headers.get("Content-Disposition", None)
+ if content_disposition:
+ _, params = cgi.parse_header(content_disposition[0],)
+ upload_name = None
+
+ # First check if there is a valid UTF-8 filename
+ upload_name_utf8 = params.get("filename*", None)
+ if upload_name_utf8:
+ if upload_name_utf8.lower().startswith("utf-8''"):
+ upload_name = upload_name_utf8[7:]
+
+ # If there isn't check for an ascii name.
+ if not upload_name:
+ upload_name_ascii = params.get("filename", None)
+ if upload_name_ascii and is_ascii(upload_name_ascii):
+ upload_name = upload_name_ascii
+
+ if upload_name:
+ upload_name = urlparse.unquote(upload_name)
+ try:
+ upload_name = upload_name.decode("utf-8")
+ except UnicodeDecodeError:
+ upload_name = None
+ else:
+ upload_name = None
+
+ yield self.store.store_cached_remote_media(
+ origin=server_name,
+ media_id=media_id,
+ media_type=media_type,
+ time_now_ms=self.clock.time_msec(),
+ upload_name=upload_name,
+ media_length=length,
+ filesystem_id=file_id,
+ )
+ except:
+ os.remove(fname)
+ raise
+
+ media_info = {
+ "media_type": media_type,
+ "media_length": length,
+ "upload_name": upload_name,
+ "created_ts": time_now_ms,
+ "filesystem_id": file_id,
+ }
+
+ yield self._generate_remote_thumbnails(
+ server_name, media_id, media_info
+ )
+
+ defer.returnValue(media_info)
+
+ @defer.inlineCallbacks
+ def _respond_with_file(self, request, media_type, file_path,
+ file_size=None, upload_name=None):
+ logger.debug("Responding with %r", file_path)
+
+ if os.path.isfile(file_path):
+ request.setHeader(b"Content-Type", media_type.encode("UTF-8"))
+ if upload_name:
+ if is_ascii(upload_name):
+ request.setHeader(
+ b"Content-Disposition",
+ b"inline; filename=%s" % (
+ urllib.quote(upload_name.encode("utf-8")),
+ ),
+ )
+ else:
+ request.setHeader(
+ b"Content-Disposition",
+ b"inline; filename*=utf-8''%s" % (
+ urllib.quote(upload_name.encode("utf-8")),
+ ),
+ )
+
+ # cache for at least a day.
+ # XXX: we might want to turn this off for data we don't want to
+ # recommend caching as it's sensitive or private - or at least
+ # select private. don't bother setting Expires as all our
+ # clients are smart enough to be happy with Cache-Control
+ request.setHeader(
+ b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
+ )
+ if file_size is None:
+ stat = os.stat(file_path)
+ file_size = stat.st_size
+
+ request.setHeader(
+ b"Content-Length", b"%d" % (file_size,)
+ )
+
+ with open(file_path, "rb") as f:
+ yield FileSender().beginFileTransfer(f, request)
+
+ request.finish()
+ else:
+ self._respond_404(request)
+
+ def _get_thumbnail_requirements(self, media_type):
+ return self.thumbnail_requirements.get(media_type, ())
+
+ def _generate_thumbnail(self, input_path, t_path, t_width, t_height,
+ t_method, t_type):
+ thumbnailer = Thumbnailer(input_path)
+ m_width = thumbnailer.width
+ m_height = thumbnailer.height
+
+ if m_width * m_height >= self.max_image_pixels:
+ logger.info(
+ "Image too large to thumbnail %r x %r > %r",
+ m_width, m_height, self.max_image_pixels
+ )
+ return
+
+ if t_method == "crop":
+ t_len = thumbnailer.crop(t_path, t_width, t_height, t_type)
+ elif t_method == "scale":
+ t_len = thumbnailer.scale(t_path, t_width, t_height, t_type)
+ else:
+ t_len = None
+
+ return t_len
+
+ @defer.inlineCallbacks
+ def _generate_local_exact_thumbnail(self, media_id, t_width, t_height,
+ t_method, t_type):
+ input_path = self.filepaths.local_media_filepath(media_id)
+
+ t_path = self.filepaths.local_media_thumbnail(
+ media_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+
+ t_len = yield threads.deferToThread(
+ self._generate_thumbnail,
+ input_path, t_path, t_width, t_height, t_method, t_type
+ )
+
+ if t_len:
+ yield self.store.store_local_thumbnail(
+ media_id, t_width, t_height, t_type, t_method, t_len
+ )
+
+ defer.returnValue(t_path)
+
+ @defer.inlineCallbacks
+ def _generate_remote_exact_thumbnail(self, server_name, file_id, media_id,
+ t_width, t_height, t_method, t_type):
+ input_path = self.filepaths.remote_media_filepath(server_name, file_id)
+
+ t_path = self.filepaths.remote_media_thumbnail(
+ server_name, file_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+
+ t_len = yield threads.deferToThread(
+ self._generate_thumbnail,
+ input_path, t_path, t_width, t_height, t_method, t_type
+ )
+
+ if t_len:
+ yield self.store.store_remote_media_thumbnail(
+ server_name, media_id, file_id,
+ t_width, t_height, t_type, t_method, t_len
+ )
+
+ defer.returnValue(t_path)
+
+ @defer.inlineCallbacks
+ def _generate_local_thumbnails(self, media_id, media_info):
+ media_type = media_info["media_type"]
+ requirements = self._get_thumbnail_requirements(media_type)
+ if not requirements:
+ return
+
+ input_path = self.filepaths.local_media_filepath(media_id)
+ thumbnailer = Thumbnailer(input_path)
+ m_width = thumbnailer.width
+ m_height = thumbnailer.height
+
+ if m_width * m_height >= self.max_image_pixels:
+ logger.info(
+ "Image too large to thumbnail %r x %r > %r",
+ m_width, m_height, self.max_image_pixels
+ )
+ return
+
+ local_thumbnails = []
+
+ def generate_thumbnails():
+ scales = set()
+ crops = set()
+ for r_width, r_height, r_method, r_type in requirements:
+ if r_method == "scale":
+ t_width, t_height = thumbnailer.aspect(r_width, r_height)
+ scales.add((
+ min(m_width, t_width), min(m_height, t_height), r_type,
+ ))
+ elif r_method == "crop":
+ crops.add((r_width, r_height, r_type))
+
+ for t_width, t_height, t_type in scales:
+ t_method = "scale"
+ t_path = self.filepaths.local_media_thumbnail(
+ media_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+ t_len = thumbnailer.scale(t_path, t_width, t_height, t_type)
+
+ local_thumbnails.append((
+ media_id, t_width, t_height, t_type, t_method, t_len
+ ))
+
+ for t_width, t_height, t_type in crops:
+ if (t_width, t_height, t_type) in scales:
+ # If the aspect ratio of the cropped thumbnail matches a purely
+ # scaled one then there is no point in calculating a separate
+ # thumbnail.
+ continue
+ t_method = "crop"
+ t_path = self.filepaths.local_media_thumbnail(
+ media_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+ t_len = thumbnailer.crop(t_path, t_width, t_height, t_type)
+ local_thumbnails.append((
+ media_id, t_width, t_height, t_type, t_method, t_len
+ ))
+
+ yield threads.deferToThread(generate_thumbnails)
+
+ for l in local_thumbnails:
+ yield self.store.store_local_thumbnail(*l)
+
+ defer.returnValue({
+ "width": m_width,
+ "height": m_height,
+ })
+
+ @defer.inlineCallbacks
+ def _generate_remote_thumbnails(self, server_name, media_id, media_info):
+ media_type = media_info["media_type"]
+ file_id = media_info["filesystem_id"]
+ requirements = self._get_thumbnail_requirements(media_type)
+ if not requirements:
+ return
+
+ remote_thumbnails = []
+
+ input_path = self.filepaths.remote_media_filepath(server_name, file_id)
+ thumbnailer = Thumbnailer(input_path)
+ m_width = thumbnailer.width
+ m_height = thumbnailer.height
+
+ def generate_thumbnails():
+ if m_width * m_height >= self.max_image_pixels:
+ logger.info(
+ "Image too large to thumbnail %r x %r > %r",
+ m_width, m_height, self.max_image_pixels
+ )
+ return
+
+ scales = set()
+ crops = set()
+ for r_width, r_height, r_method, r_type in requirements:
+ if r_method == "scale":
+ t_width, t_height = thumbnailer.aspect(r_width, r_height)
+ scales.add((
+ min(m_width, t_width), min(m_height, t_height), r_type,
+ ))
+ elif r_method == "crop":
+ crops.add((r_width, r_height, r_type))
+
+ for t_width, t_height, t_type in scales:
+ t_method = "scale"
+ t_path = self.filepaths.remote_media_thumbnail(
+ server_name, file_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+ t_len = thumbnailer.scale(t_path, t_width, t_height, t_type)
+ remote_thumbnails.append([
+ server_name, media_id, file_id,
+ t_width, t_height, t_type, t_method, t_len
+ ])
+
+ for t_width, t_height, t_type in crops:
+ if (t_width, t_height, t_type) in scales:
+ # If the aspect ratio of the cropped thumbnail matches a purely
+ # scaled one then there is no point in calculating a separate
+ # thumbnail.
+ continue
+ t_method = "crop"
+ t_path = self.filepaths.remote_media_thumbnail(
+ server_name, file_id, t_width, t_height, t_type, t_method
+ )
+ self._makedirs(t_path)
+ t_len = thumbnailer.crop(t_path, t_width, t_height, t_type)
+ remote_thumbnails.append([
+ server_name, media_id, file_id,
+ t_width, t_height, t_type, t_method, t_len
+ ])
+
+ yield threads.deferToThread(generate_thumbnails)
+
+ for r in remote_thumbnails:
+ yield self.store.store_remote_media_thumbnail(*r)
+
+ defer.returnValue({
+ "width": m_width,
+ "height": m_height,
+ })
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py
new file mode 100644
index 00000000..ab384e53
--- /dev/null
+++ b/synapse/rest/media/v1/download_resource.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_resource import BaseMediaResource, parse_media_id
+from synapse.http.server import request_handler
+
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class DownloadResource(BaseMediaResource):
+ def render_GET(self, request):
+ self._async_render_GET(request)
+ return NOT_DONE_YET
+
+ @request_handler
+ @defer.inlineCallbacks
+ def _async_render_GET(self, request):
+ server_name, media_id, name = parse_media_id(request)
+ if server_name == self.server_name:
+ yield self._respond_local_file(request, media_id, name)
+ else:
+ yield self._respond_remote_file(
+ request, server_name, media_id, name
+ )
+
+ @defer.inlineCallbacks
+ def _respond_local_file(self, request, media_id, name):
+ media_info = yield self.store.get_local_media(media_id)
+ if not media_info:
+ self._respond_404(request)
+ return
+
+ media_type = media_info["media_type"]
+ media_length = media_info["media_length"]
+ upload_name = name if name else media_info["upload_name"]
+ file_path = self.filepaths.local_media_filepath(media_id)
+
+ yield self._respond_with_file(
+ request, media_type, file_path, media_length,
+ upload_name=upload_name,
+ )
+
+ @defer.inlineCallbacks
+ def _respond_remote_file(self, request, server_name, media_id, name):
+ media_info = yield self._get_remote_media(server_name, media_id)
+
+ media_type = media_info["media_type"]
+ media_length = media_info["media_length"]
+ filesystem_id = media_info["filesystem_id"]
+ upload_name = name if name else media_info["upload_name"]
+
+ file_path = self.filepaths.remote_media_filepath(
+ server_name, filesystem_id
+ )
+
+ yield self._respond_with_file(
+ request, media_type, file_path, media_length,
+ upload_name=upload_name,
+ )
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
new file mode 100644
index 00000000..ed9a58e9
--- /dev/null
+++ b/synapse/rest/media/v1/filepath.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+
+class MediaFilePaths(object):
+
+ def __init__(self, base_path):
+ self.base_path = base_path
+
+ def default_thumbnail(self, default_top_level, default_sub_type, width,
+ height, content_type, method):
+ top_level_type, sub_type = content_type.split("/")
+ file_name = "%i-%i-%s-%s-%s" % (
+ width, height, top_level_type, sub_type, method
+ )
+ return os.path.join(
+ self.base_path, "default_thumbnails", default_top_level,
+ default_sub_type, file_name
+ )
+
+ def local_media_filepath(self, media_id):
+ return os.path.join(
+ self.base_path, "local_content",
+ media_id[0:2], media_id[2:4], media_id[4:]
+ )
+
+ def local_media_thumbnail(self, media_id, width, height, content_type,
+ method):
+ top_level_type, sub_type = content_type.split("/")
+ file_name = "%i-%i-%s-%s-%s" % (
+ width, height, top_level_type, sub_type, method
+ )
+ return os.path.join(
+ self.base_path, "local_thumbnails",
+ media_id[0:2], media_id[2:4], media_id[4:],
+ file_name
+ )
+
+ def remote_media_filepath(self, server_name, file_id):
+ return os.path.join(
+ self.base_path, "remote_content", server_name,
+ file_id[0:2], file_id[2:4], file_id[4:]
+ )
+
+ def remote_media_thumbnail(self, server_name, file_id, width, height,
+ content_type, method):
+ top_level_type, sub_type = content_type.split("/")
+ file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type)
+ return os.path.join(
+ self.base_path, "remote_thumbnail", server_name,
+ file_id[0:2], file_id[2:4], file_id[4:],
+ file_name
+ )
diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py
new file mode 100644
index 00000000..603859d5
--- /dev/null
+++ b/synapse/rest/media/v1/identicon_resource.py
@@ -0,0 +1,65 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pydenticon import Generator
+from twisted.web.resource import Resource
+
+FOREGROUND = [
+ "rgb(45,79,255)",
+ "rgb(254,180,44)",
+ "rgb(226,121,234)",
+ "rgb(30,179,253)",
+ "rgb(232,77,65)",
+ "rgb(49,203,115)",
+ "rgb(141,69,170)"
+]
+
+BACKGROUND = "rgb(224,224,224)"
+SIZE = 5
+
+
+class IdenticonResource(Resource):
+ isLeaf = True
+
+ def __init__(self):
+ Resource.__init__(self)
+ self.generator = Generator(
+ SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
+ )
+
+ def generate_identicon(self, name, width, height):
+ v_padding = width % SIZE
+ h_padding = height % SIZE
+ top_padding = v_padding // 2
+ left_padding = h_padding // 2
+ bottom_padding = v_padding - top_padding
+ right_padding = h_padding - left_padding
+ width -= v_padding
+ height -= h_padding
+ padding = (top_padding, bottom_padding, left_padding, right_padding)
+ identicon = self.generator.generate(
+ name, width, height, padding=padding
+ )
+ return identicon
+
+ def render_GET(self, request):
+ name = "/".join(request.postpath)
+ width = int(request.args.get("width", [96])[0])
+ height = int(request.args.get("height", [96])[0])
+ identicon_bytes = self.generate_identicon(name, width, height)
+ request.setHeader(b"Content-Type", b"image/png")
+ request.setHeader(
+ b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
+ )
+ return identicon_bytes
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
new file mode 100644
index 00000000..9ca4d884
--- /dev/null
+++ b/synapse/rest/media/v1/media_repository.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .upload_resource import UploadResource
+from .download_resource import DownloadResource
+from .thumbnail_resource import ThumbnailResource
+from .identicon_resource import IdenticonResource
+from .filepath import MediaFilePaths
+
+from twisted.web.resource import Resource
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class MediaRepositoryResource(Resource):
+ """File uploading and downloading.
+
+ Uploads are POSTed to a resource which returns a token which is used to GET
+ the download::
+
+ => POST /_matrix/media/v1/upload HTTP/1.1
+ Content-Type: <media-type>
+ Content-Length: <content-length>
+
+ <media>
+
+ <= HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ { "content_uri": "mxc://<server-name>/<media-id>" }
+
+ => GET /_matrix/media/v1/download/<server-name>/<media-id> HTTP/1.1
+
+ <= HTTP/1.1 200 OK
+ Content-Type: <media-type>
+ Content-Disposition: attachment;filename=<upload-filename>
+
+ <media>
+
+ Clients can get thumbnails by supplying a desired width and height and
+ thumbnailing method::
+
+ => GET /_matrix/media/v1/thumbnail/<server_name>
+ /<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1
+
+ <= HTTP/1.1 200 OK
+ Content-Type: image/jpeg or image/png
+
+ <thumbnail>
+
+ The thumbnail methods are "crop" and "scale". "scale" trys to return an
+ image where either the width or the height is smaller than the requested
+ size. The client should then scale and letterbox the image if it needs to
+ fit within a given rectangle. "crop" trys to return an image where the
+ width and height are close to the requested size and the aspect matches
+ the requested size. The client should scale the image if it needs to fit
+ within a given rectangle.
+ """
+
+ def __init__(self, hs):
+ Resource.__init__(self)
+ filepaths = MediaFilePaths(hs.config.media_store_path)
+ self.putChild("upload", UploadResource(hs, filepaths))
+ self.putChild("download", DownloadResource(hs, filepaths))
+ self.putChild("thumbnail", ThumbnailResource(hs, filepaths))
+ self.putChild("identicon", IdenticonResource())
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
new file mode 100644
index 00000000..e506dad9
--- /dev/null
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .base_resource import BaseMediaResource, parse_media_id
+from synapse.http.servlet import parse_string, parse_integer
+from synapse.http.server import request_handler
+
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class ThumbnailResource(BaseMediaResource):
+ isLeaf = True
+
+ def render_GET(self, request):
+ self._async_render_GET(request)
+ return NOT_DONE_YET
+
+ @request_handler
+ @defer.inlineCallbacks
+ def _async_render_GET(self, request):
+ server_name, media_id, _ = parse_media_id(request)
+ width = parse_integer(request, "width")
+ height = parse_integer(request, "height")
+ method = parse_string(request, "method", "scale")
+ m_type = parse_string(request, "type", "image/png")
+
+ if server_name == self.server_name:
+ if self.dynamic_thumbnails:
+ yield self._select_or_generate_local_thumbnail(
+ request, media_id, width, height, method, m_type
+ )
+ else:
+ yield self._respond_local_thumbnail(
+ request, media_id, width, height, method, m_type
+ )
+ else:
+ if self.dynamic_thumbnails:
+ yield self._select_or_generate_remote_thumbnail(
+ request, server_name, media_id,
+ width, height, method, m_type
+ )
+ else:
+ yield self._respond_remote_thumbnail(
+ request, server_name, media_id,
+ width, height, method, m_type
+ )
+
+ @defer.inlineCallbacks
+ def _respond_local_thumbnail(self, request, media_id, width, height,
+ method, m_type):
+ media_info = yield self.store.get_local_media(media_id)
+
+ if not media_info:
+ self._respond_404(request)
+ return
+
+ thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id)
+
+ if thumbnail_infos:
+ thumbnail_info = self._select_thumbnail(
+ width, height, method, m_type, thumbnail_infos
+ )
+ t_width = thumbnail_info["thumbnail_width"]
+ t_height = thumbnail_info["thumbnail_height"]
+ t_type = thumbnail_info["thumbnail_type"]
+ t_method = thumbnail_info["thumbnail_method"]
+
+ file_path = self.filepaths.local_media_thumbnail(
+ media_id, t_width, t_height, t_type, t_method,
+ )
+ yield self._respond_with_file(request, t_type, file_path)
+
+ else:
+ yield self._respond_default_thumbnail(
+ request, media_info, width, height, method, m_type,
+ )
+
+ @defer.inlineCallbacks
+ def _select_or_generate_local_thumbnail(self, request, media_id, desired_width,
+ desired_height, desired_method,
+ desired_type):
+ media_info = yield self.store.get_local_media(media_id)
+
+ if not media_info:
+ self._respond_404(request)
+ return
+
+ thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id)
+ for info in thumbnail_infos:
+ t_w = info["thumbnail_width"] == desired_width
+ t_h = info["thumbnail_height"] == desired_height
+ t_method = info["thumbnail_method"] == desired_method
+ t_type = info["thumbnail_type"] == desired_type
+
+ if t_w and t_h and t_method and t_type:
+ file_path = self.filepaths.local_media_thumbnail(
+ media_id, desired_width, desired_height, desired_type, desired_method,
+ )
+ yield self._respond_with_file(request, desired_type, file_path)
+ return
+
+ logger.debug("We don't have a local thumbnail of that size. Generating")
+
+ # Okay, so we generate one.
+ file_path = yield self._generate_local_exact_thumbnail(
+ media_id, desired_width, desired_height, desired_method, desired_type
+ )
+
+ if file_path:
+ yield self._respond_with_file(request, desired_type, file_path)
+ else:
+ yield self._respond_default_thumbnail(
+ request, media_info, desired_width, desired_height,
+ desired_method, desired_type,
+ )
+
+ @defer.inlineCallbacks
+ def _select_or_generate_remote_thumbnail(self, request, server_name, media_id,
+ desired_width, desired_height,
+ desired_method, desired_type):
+ media_info = yield self._get_remote_media(server_name, media_id)
+
+ thumbnail_infos = yield self.store.get_remote_media_thumbnails(
+ server_name, media_id,
+ )
+
+ file_id = media_info["filesystem_id"]
+
+ for info in thumbnail_infos:
+ t_w = info["thumbnail_width"] == desired_width
+ t_h = info["thumbnail_height"] == desired_height
+ t_method = info["thumbnail_method"] == desired_method
+ t_type = info["thumbnail_type"] == desired_type
+
+ if t_w and t_h and t_method and t_type:
+ file_path = self.filepaths.remote_media_thumbnail(
+ server_name, file_id, desired_width, desired_height,
+ desired_type, desired_method,
+ )
+ yield self._respond_with_file(request, desired_type, file_path)
+ return
+
+ logger.debug("We don't have a local thumbnail of that size. Generating")
+
+ # Okay, so we generate one.
+ file_path = yield self._generate_remote_exact_thumbnail(
+ server_name, file_id, media_id, desired_width,
+ desired_height, desired_method, desired_type
+ )
+
+ if file_path:
+ yield self._respond_with_file(request, desired_type, file_path)
+ else:
+ yield self._respond_default_thumbnail(
+ request, media_info, desired_width, desired_height,
+ desired_method, desired_type,
+ )
+
+ @defer.inlineCallbacks
+ def _respond_remote_thumbnail(self, request, server_name, media_id, width,
+ height, method, m_type):
+ # TODO: Don't download the whole remote file
+ # We should proxy the thumbnail from the remote server instead.
+ media_info = yield self._get_remote_media(server_name, media_id)
+
+ thumbnail_infos = yield self.store.get_remote_media_thumbnails(
+ server_name, media_id,
+ )
+
+ if thumbnail_infos:
+ thumbnail_info = self._select_thumbnail(
+ width, height, method, m_type, thumbnail_infos
+ )
+ t_width = thumbnail_info["thumbnail_width"]
+ t_height = thumbnail_info["thumbnail_height"]
+ t_type = thumbnail_info["thumbnail_type"]
+ t_method = thumbnail_info["thumbnail_method"]
+ file_id = thumbnail_info["filesystem_id"]
+ t_length = thumbnail_info["thumbnail_length"]
+
+ file_path = self.filepaths.remote_media_thumbnail(
+ server_name, file_id, t_width, t_height, t_type, t_method,
+ )
+ yield self._respond_with_file(request, t_type, file_path, t_length)
+ else:
+ yield self._respond_default_thumbnail(
+ request, media_info, width, height, method, m_type,
+ )
+
+ @defer.inlineCallbacks
+ def _respond_default_thumbnail(self, request, media_info, width, height,
+ method, m_type):
+ media_type = media_info["media_type"]
+ top_level_type = media_type.split("/")[0]
+ sub_type = media_type.split("/")[-1].split(";")[0]
+ thumbnail_infos = yield self.store.get_default_thumbnails(
+ top_level_type, sub_type,
+ )
+ if not thumbnail_infos:
+ thumbnail_infos = yield self.store.get_default_thumbnails(
+ top_level_type, "_default",
+ )
+ if not thumbnail_infos:
+ thumbnail_infos = yield self.store.get_default_thumbnails(
+ "_default", "_default",
+ )
+ if not thumbnail_infos:
+ self._respond_404(request)
+ return
+
+ thumbnail_info = self._select_thumbnail(
+ width, height, "crop", m_type, thumbnail_infos
+ )
+
+ t_width = thumbnail_info["thumbnail_width"]
+ t_height = thumbnail_info["thumbnail_height"]
+ t_type = thumbnail_info["thumbnail_type"]
+ t_method = thumbnail_info["thumbnail_method"]
+ t_length = thumbnail_info["thumbnail_length"]
+
+ file_path = self.filepaths.default_thumbnail(
+ top_level_type, sub_type, t_width, t_height, t_type, t_method,
+ )
+ yield self.respond_with_file(request, t_type, file_path, t_length)
+
+ def _select_thumbnail(self, desired_width, desired_height, desired_method,
+ desired_type, thumbnail_infos):
+ d_w = desired_width
+ d_h = desired_height
+
+ if desired_method.lower() == "crop":
+ info_list = []
+ for info in thumbnail_infos:
+ t_w = info["thumbnail_width"]
+ t_h = info["thumbnail_height"]
+ t_method = info["thumbnail_method"]
+ if t_method == "scale" or t_method == "crop":
+ aspect_quality = abs(d_w * t_h - d_h * t_w)
+ min_quality = 0 if d_w <= t_w and d_h <= t_h else 1
+ size_quality = abs((d_w - t_w) * (d_h - t_h))
+ type_quality = desired_type != info["thumbnail_type"]
+ length_quality = info["thumbnail_length"]
+ info_list.append((
+ aspect_quality, min_quality, size_quality, type_quality,
+ length_quality, info
+ ))
+ if info_list:
+ return min(info_list)[-1]
+ else:
+ info_list = []
+ info_list2 = []
+ for info in thumbnail_infos:
+ t_w = info["thumbnail_width"]
+ t_h = info["thumbnail_height"]
+ t_method = info["thumbnail_method"]
+ size_quality = abs((d_w - t_w) * (d_h - t_h))
+ type_quality = desired_type != info["thumbnail_type"]
+ length_quality = info["thumbnail_length"]
+ if t_method == "scale" and (t_w >= d_w or t_h >= d_h):
+ info_list.append((
+ size_quality, type_quality, length_quality, info
+ ))
+ elif t_method == "scale":
+ info_list2.append((
+ size_quality, type_quality, length_quality, info
+ ))
+ if info_list:
+ return min(info_list)[-1]
+ else:
+ return min(info_list2)[-1]
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
new file mode 100644
index 00000000..1e965c36
--- /dev/null
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import PIL.Image as Image
+from io import BytesIO
+
+
+class Thumbnailer(object):
+
+ FORMATS = {
+ "image/jpeg": "JPEG",
+ "image/png": "PNG",
+ }
+
+ def __init__(self, input_path):
+ self.image = Image.open(input_path)
+ self.width, self.height = self.image.size
+
+ def aspect(self, max_width, max_height):
+ """Calculate the largest size that preserves aspect ratio which
+ fits within the given rectangle::
+
+ (w_in / h_in) = (w_out / h_out)
+ w_out = min(w_max, h_max * (w_in / h_in))
+ h_out = min(h_max, w_max * (h_in / w_in))
+
+ Args:
+ max_width: The largest possible width.
+ max_height: The larget possible height.
+ """
+
+ if max_width * self.height < max_height * self.width:
+ return (max_width, (max_width * self.height) // self.width)
+ else:
+ return ((max_height * self.width) // self.height, max_height)
+
+ def scale(self, output_path, width, height, output_type):
+ """Rescales the image to the given dimensions"""
+ scaled = self.image.resize((width, height), Image.ANTIALIAS)
+ return self.save_image(scaled, output_type, output_path)
+
+ def crop(self, output_path, width, height, output_type):
+ """Rescales and crops the image to the given dimensions preserving
+ aspect::
+ (w_in / h_in) = (w_scaled / h_scaled)
+ w_scaled = max(w_out, h_out * (w_in / h_in))
+ h_scaled = max(h_out, w_out * (h_in / w_in))
+
+ Args:
+ max_width: The largest possible width.
+ max_height: The larget possible height.
+ """
+ if width * self.height > height * self.width:
+ scaled_height = (width * self.height) // self.width
+ scaled_image = self.image.resize(
+ (width, scaled_height), Image.ANTIALIAS
+ )
+ crop_top = (scaled_height - height) // 2
+ crop_bottom = height + crop_top
+ cropped = scaled_image.crop((0, crop_top, width, crop_bottom))
+ else:
+ scaled_width = (height * self.width) // self.height
+ scaled_image = self.image.resize(
+ (scaled_width, height), Image.ANTIALIAS
+ )
+ crop_left = (scaled_width - width) // 2
+ crop_right = width + crop_left
+ cropped = scaled_image.crop((crop_left, 0, crop_right, height))
+ return self.save_image(cropped, output_type, output_path)
+
+ def save_image(self, output_image, output_type, output_path):
+ output_bytes_io = BytesIO()
+ output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80)
+ output_bytes = output_bytes_io.getvalue()
+ with open(output_path, "wb") as output_file:
+ output_file.write(output_bytes)
+ return len(output_bytes)
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
new file mode 100644
index 00000000..7d615960
--- /dev/null
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import respond_with_json, request_handler
+
+from synapse.util.stringutils import random_string
+from synapse.api.errors import SynapseError
+
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+from .base_resource import BaseMediaResource
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class UploadResource(BaseMediaResource):
+ def render_POST(self, request):
+ self._async_render_POST(request)
+ return NOT_DONE_YET
+
+ def render_OPTIONS(self, request):
+ respond_with_json(request, 200, {}, send_cors=True)
+ return NOT_DONE_YET
+
+ @defer.inlineCallbacks
+ def create_content(self, media_type, upload_name, content, content_length,
+ auth_user):
+ media_id = random_string(24)
+
+ fname = self.filepaths.local_media_filepath(media_id)
+ self._makedirs(fname)
+
+ # This shouldn't block for very long because the content will have
+ # already been uploaded at this point.
+ with open(fname, "wb") as f:
+ f.write(content)
+
+ yield self.store.store_local_media(
+ media_id=media_id,
+ media_type=media_type,
+ time_now_ms=self.clock.time_msec(),
+ upload_name=upload_name,
+ media_length=content_length,
+ user_id=auth_user,
+ )
+ media_info = {
+ "media_type": media_type,
+ "media_length": content_length,
+ }
+
+ yield self._generate_local_thumbnails(media_id, media_info)
+
+ defer.returnValue("mxc://%s/%s" % (self.server_name, media_id))
+
+ @request_handler
+ @defer.inlineCallbacks
+ def _async_render_POST(self, request):
+ auth_user, _, _ = yield self.auth.get_user_by_req(request)
+ # TODO: The checks here are a bit late. The content will have
+ # already been uploaded to a tmp file at this point
+ content_length = request.getHeader("Content-Length")
+ if content_length is None:
+ raise SynapseError(
+ msg="Request must specify a Content-Length", code=400
+ )
+ if int(content_length) > self.max_upload_size:
+ raise SynapseError(
+ msg="Upload request body is too large",
+ code=413,
+ )
+
+ upload_name = request.args.get("filename", None)
+ if upload_name:
+ try:
+ upload_name = upload_name[0].decode('UTF-8')
+ except UnicodeDecodeError:
+ raise SynapseError(
+ msg="Invalid UTF-8 filename parameter: %r" % (upload_name),
+ code=400,
+ )
+
+ headers = request.requestHeaders
+
+ if headers.hasHeader("Content-Type"):
+ media_type = headers.getRawHeaders("Content-Type")[0]
+ else:
+ raise SynapseError(
+ msg="Upload request missing 'Content-Type'",
+ code=400,
+ )
+
+ # if headers.hasHeader("Content-Disposition"):
+ # disposition = headers.getRawHeaders("Content-Disposition")[0]
+ # TODO(markjh): parse content-dispostion
+
+ content_uri = yield self.create_content(
+ media_type, upload_name, request.content.read(),
+ content_length, auth_user
+ )
+
+ respond_with_json(
+ request, 200, {"content_uri": content_uri}, send_cors=True
+ )
diff --git a/synapse/server.py b/synapse/server.py
new file mode 100644
index 00000000..f75d5358
--- /dev/null
+++ b/synapse/server.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file provides some classes for setting up (partially-populated)
+# homeservers; either as a full homeserver as a real application, or a small
+# partial one for unit test mocking.
+
+# Imports required for the default HomeServer() implementation
+from twisted.web.client import BrowserLikePolicyForHTTPS
+from synapse.federation import initialize_http_replication
+from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
+from synapse.notifier import Notifier
+from synapse.api.auth import Auth
+from synapse.handlers import Handlers
+from synapse.state import StateHandler
+from synapse.storage import DataStore
+from synapse.util import Clock
+from synapse.util.distributor import Distributor
+from synapse.streams.events import EventSources
+from synapse.api.ratelimiting import Ratelimiter
+from synapse.crypto.keyring import Keyring
+from synapse.push.pusherpool import PusherPool
+from synapse.events.builder import EventBuilderFactory
+from synapse.api.filtering import Filtering
+
+
+class BaseHomeServer(object):
+ """A basic homeserver object without lazy component builders.
+
+ This will need all of the components it requires to either be passed as
+ constructor arguments, or the relevant methods overriding to create them.
+ Typically this would only be used for unit tests.
+
+ For every dependency in the DEPENDENCIES list below, this class creates one
+ method,
+ def get_DEPENDENCY(self)
+ which returns the value of that dependency. If no value has yet been set
+ nor was provided to the constructor, it will attempt to call a lazy builder
+ method called
+ def build_DEPENDENCY(self)
+ which must be implemented by the subclass. This code may call any of the
+ required "get" methods on the instance to obtain the sub-dependencies that
+ one requires.
+ """
+
+ DEPENDENCIES = [
+ 'config',
+ 'clock',
+ 'http_client',
+ 'db_pool',
+ 'persistence_service',
+ 'replication_layer',
+ 'datastore',
+ 'handlers',
+ 'v1auth',
+ 'auth',
+ 'rest_servlet_factory',
+ 'state_handler',
+ 'notifier',
+ 'distributor',
+ 'resource_for_client',
+ 'resource_for_client_v2_alpha',
+ 'resource_for_federation',
+ 'resource_for_static_content',
+ 'resource_for_web_client',
+ 'resource_for_content_repo',
+ 'resource_for_server_key',
+ 'resource_for_server_key_v2',
+ 'resource_for_media_repository',
+ 'resource_for_metrics',
+ 'event_sources',
+ 'ratelimiter',
+ 'keyring',
+ 'pusherpool',
+ 'event_builder_factory',
+ 'filtering',
+ 'http_client_context_factory',
+ 'simple_http_client',
+ ]
+
+ def __init__(self, hostname, **kwargs):
+ """
+ Args:
+ hostname : The hostname for the server.
+ """
+ self.hostname = hostname
+ self._building = {}
+
+ # Other kwargs are explicit dependencies
+ for depname in kwargs:
+ setattr(self, depname, kwargs[depname])
+
+ @classmethod
+ def _make_dependency_method(cls, depname):
+ def _get(self):
+ if hasattr(self, depname):
+ return getattr(self, depname)
+
+ if hasattr(self, "build_%s" % (depname)):
+ # Prevent cyclic dependencies from deadlocking
+ if depname in self._building:
+ raise ValueError("Cyclic dependency while building %s" % (
+ depname,
+ ))
+ self._building[depname] = 1
+
+ builder = getattr(self, "build_%s" % (depname))
+ dep = builder()
+ setattr(self, depname, dep)
+
+ del self._building[depname]
+
+ return dep
+
+ raise NotImplementedError(
+ "%s has no %s nor a builder for it" % (
+ type(self).__name__, depname,
+ )
+ )
+
+ setattr(BaseHomeServer, "get_%s" % (depname), _get)
+
+ def get_ip_from_request(self, request):
+ # X-Forwarded-For is handled by our custom request type.
+ return request.getClientIP()
+
+ def is_mine(self, domain_specific_string):
+ return domain_specific_string.domain == self.hostname
+
+# Build magic accessors for every dependency
+for depname in BaseHomeServer.DEPENDENCIES:
+ BaseHomeServer._make_dependency_method(depname)
+
+
+class HomeServer(BaseHomeServer):
+ """A homeserver object that will construct most of its dependencies as
+ required.
+
+ It still requires the following to be specified by the caller:
+ resource_for_client
+ resource_for_web_client
+ resource_for_federation
+ resource_for_content_repo
+ http_client
+ db_pool
+ """
+
+ def build_clock(self):
+ return Clock()
+
+ def build_replication_layer(self):
+ return initialize_http_replication(self)
+
+ def build_datastore(self):
+ return DataStore(self)
+
+ def build_handlers(self):
+ return Handlers(self)
+
+ def build_notifier(self):
+ return Notifier(self)
+
+ def build_auth(self):
+ return Auth(self)
+
+ def build_http_client_context_factory(self):
+ config = self.get_config()
+ return (
+ InsecureInterceptableContextFactory()
+ if config.use_insecure_ssl_client_just_for_testing_do_not_use
+ else BrowserLikePolicyForHTTPS()
+ )
+
+ def build_simple_http_client(self):
+ return SimpleHttpClient(self)
+
+ def build_v1auth(self):
+ orf = Auth(self)
+ # Matrix spec makes no reference to what HTTP status code is returned,
+ # but the V1 API uses 403 where it means 401, and the webclient
+ # relies on this behaviour, so V1 gets its own copy of the auth
+ # with backwards compat behaviour.
+ orf.TOKEN_NOT_FOUND_HTTP_STATUS = 403
+ return orf
+
+ def build_state_handler(self):
+ return StateHandler(self)
+
+ def build_distributor(self):
+ return Distributor()
+
+ def build_event_sources(self):
+ return EventSources(self)
+
+ def build_ratelimiter(self):
+ return Ratelimiter()
+
+ def build_keyring(self):
+ return Keyring(self)
+
+ def build_event_builder_factory(self):
+ return EventBuilderFactory(
+ clock=self.get_clock(),
+ hostname=self.hostname,
+ )
+
+ def build_filtering(self):
+ return Filtering(self)
+
+ def build_pusherpool(self):
+ return PusherPool(self)
diff --git a/synapse/state.py b/synapse/state.py
new file mode 100644
index 00000000..8ea2cac5
--- /dev/null
+++ b/synapse/state.py
@@ -0,0 +1,397 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+
+from synapse.util.logutils import log_function
+from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.api.constants import EventTypes
+from synapse.api.errors import AuthError
+from synapse.api.auth import AuthEventTypes
+from synapse.events.snapshot import EventContext
+
+from collections import namedtuple
+
+import logging
+import hashlib
+
+logger = logging.getLogger(__name__)
+
+
+KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key"))
+
+
+SIZE_OF_CACHE = 1000
+EVICTION_TIMEOUT_SECONDS = 20
+
+
+class _StateCacheEntry(object):
+ def __init__(self, state, state_group, ts):
+ self.state = state
+ self.state_group = state_group
+
+
+class StateHandler(object):
+ """ Responsible for doing state conflict resolution.
+ """
+
+ def __init__(self, hs):
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastore()
+ self.hs = hs
+
+ # dict of set of event_ids -> _StateCacheEntry.
+ self._state_cache = None
+
+ def start_caching(self):
+ logger.debug("start_caching")
+
+ self._state_cache = ExpiringCache(
+ cache_name="state_cache",
+ clock=self.clock,
+ max_len=SIZE_OF_CACHE,
+ expiry_ms=EVICTION_TIMEOUT_SECONDS*1000,
+ reset_expiry_on_get=True,
+ )
+
+ self._state_cache.start()
+
+ @defer.inlineCallbacks
+ def get_current_state(self, room_id, event_type=None, state_key=""):
+ """ Retrieves the current state for the room. This is done by
+ calling `get_latest_events_in_room` to get the leading edges of the
+ event graph and then resolving any of the state conflicts.
+
+ This is equivalent to getting the state of an event that were to send
+ next before receiving any new events.
+
+ If `event_type` is specified, then the method returns only the one
+ event (or None) with that `event_type` and `state_key`.
+
+ :returns map from (type, state_key) to event
+ """
+ event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
+
+ cache = None
+ if self._state_cache is not None:
+ cache = self._state_cache.get(frozenset(event_ids), None)
+
+ if cache:
+ cache.ts = self.clock.time_msec()
+ state = cache.state
+ else:
+ res = yield self.resolve_state_groups(room_id, event_ids)
+ state = res[1]
+
+ if event_type:
+ defer.returnValue(state.get((event_type, state_key)))
+ return
+
+ defer.returnValue(state)
+
+ @defer.inlineCallbacks
+ def compute_event_context(self, event, old_state=None, outlier=False):
+ """ Fills out the context with the `current state` of the graph. The
+ `current state` here is defined to be the state of the event graph
+ just before the event - i.e. it never includes `event`
+
+ If `event` has `auth_events` then this will also fill out the
+ `auth_events` field on `context` from the `current_state`.
+
+ Args:
+ event (EventBase)
+ Returns:
+ an EventContext
+ """
+ context = EventContext()
+
+ if outlier:
+ # If this is an outlier, then we know it shouldn't have any current
+ # state. Certainly store.get_current_state won't return any, and
+ # persisting the event won't store the state group.
+ if old_state:
+ context.current_state = {
+ (s.type, s.state_key): s for s in old_state
+ }
+ else:
+ context.current_state = {}
+ context.prev_state_events = []
+ context.state_group = None
+ defer.returnValue(context)
+
+ if old_state:
+ context.current_state = {
+ (s.type, s.state_key): s for s in old_state
+ }
+ context.state_group = None
+
+ if event.is_state():
+ key = (event.type, event.state_key)
+ if key in context.current_state:
+ replaces = context.current_state[key]
+ if replaces.event_id != event.event_id: # Paranoia check
+ event.unsigned["replaces_state"] = replaces.event_id
+
+ context.prev_state_events = []
+ defer.returnValue(context)
+
+ if event.is_state():
+ ret = yield self.resolve_state_groups(
+ event.room_id, [e for e, _ in event.prev_events],
+ event_type=event.type,
+ state_key=event.state_key,
+ )
+ else:
+ ret = yield self.resolve_state_groups(
+ event.room_id, [e for e, _ in event.prev_events],
+ )
+
+ group, curr_state, prev_state = ret
+
+ context.current_state = curr_state
+ context.state_group = group if not event.is_state() else None
+
+ if event.is_state():
+ key = (event.type, event.state_key)
+ if key in context.current_state:
+ replaces = context.current_state[key]
+ event.unsigned["replaces_state"] = replaces.event_id
+
+ context.prev_state_events = prev_state
+ defer.returnValue(context)
+
+ @defer.inlineCallbacks
+ @log_function
+ def resolve_state_groups(self, room_id, event_ids, event_type=None, state_key=""):
+ """ Given a list of event_ids this method fetches the state at each
+ event, resolves conflicts between them and returns them.
+
+ :returns a Deferred tuple of (`state_group`, `state`, `prev_state`).
+ `state_group` is the name of a state group if one and only one is
+ involved. `state` is a map from (type, state_key) to event, and
+ `prev_state` is a list of event ids.
+ """
+ logger.debug("resolve_state_groups event_ids %s", event_ids)
+
+ if self._state_cache is not None:
+ cache = self._state_cache.get(frozenset(event_ids), None)
+ if cache and cache.state_group:
+ cache.ts = self.clock.time_msec()
+ prev_state = cache.state.get((event_type, state_key), None)
+ if prev_state:
+ prev_state = prev_state.event_id
+ prev_states = [prev_state]
+ else:
+ prev_states = []
+ defer.returnValue(
+ (cache.state_group, cache.state, prev_states)
+ )
+
+ state_groups = yield self.store.get_state_groups(
+ room_id, event_ids
+ )
+
+ logger.debug(
+ "resolve_state_groups state_groups %s",
+ state_groups.keys()
+ )
+
+ group_names = set(state_groups.keys())
+ if len(group_names) == 1:
+ name, state_list = state_groups.items().pop()
+ state = {
+ (e.type, e.state_key): e
+ for e in state_list
+ }
+ prev_state = state.get((event_type, state_key), None)
+ if prev_state:
+ prev_state = prev_state.event_id
+ prev_states = [prev_state]
+ else:
+ prev_states = []
+
+ if self._state_cache is not None:
+ cache = _StateCacheEntry(
+ state=state,
+ state_group=name,
+ ts=self.clock.time_msec()
+ )
+
+ self._state_cache[frozenset(event_ids)] = cache
+
+ defer.returnValue((name, state, prev_states))
+
+ new_state, prev_states = self._resolve_events(
+ state_groups.values(), event_type, state_key
+ )
+
+ if self._state_cache is not None:
+ cache = _StateCacheEntry(
+ state=new_state,
+ state_group=None,
+ ts=self.clock.time_msec()
+ )
+
+ self._state_cache[frozenset(event_ids)] = cache
+
+ defer.returnValue((None, new_state, prev_states))
+
+ def resolve_events(self, state_sets, event):
+ if event.is_state():
+ return self._resolve_events(
+ state_sets, event.type, event.state_key
+ )
+ else:
+ return self._resolve_events(state_sets)
+
+ def _resolve_events(self, state_sets, event_type=None, state_key=""):
+ """
+ :returns a tuple (new_state, prev_states). new_state is a map
+ from (type, state_key) to event. prev_states is a list of event_ids.
+ :rtype: (dict[(str, str), synapse.events.FrozenEvent], list[str])
+ """
+ state = {}
+ for st in state_sets:
+ for e in st:
+ state.setdefault(
+ (e.type, e.state_key),
+ {}
+ )[e.event_id] = e
+
+ unconflicted_state = {
+ k: v.values()[0] for k, v in state.items()
+ if len(v.values()) == 1
+ }
+
+ conflicted_state = {
+ k: v.values()
+ for k, v in state.items()
+ if len(v.values()) > 1
+ }
+
+ if event_type:
+ prev_states_events = conflicted_state.get(
+ (event_type, state_key), []
+ )
+ prev_states = [s.event_id for s in prev_states_events]
+ else:
+ prev_states = []
+
+ auth_events = {
+ k: e for k, e in unconflicted_state.items()
+ if k[0] in AuthEventTypes
+ }
+
+ try:
+ resolved_state = self._resolve_state_events(
+ conflicted_state, auth_events
+ )
+ except:
+ logger.exception("Failed to resolve state")
+ raise
+
+ new_state = unconflicted_state
+ new_state.update(resolved_state)
+
+ return new_state, prev_states
+
+ @log_function
+ def _resolve_state_events(self, conflicted_state, auth_events):
+ """ This is where we actually decide which of the conflicted state to
+ use.
+
+ We resolve conflicts in the following order:
+ 1. power levels
+ 2. join rules
+ 3. memberships
+ 4. other events.
+ """
+ resolved_state = {}
+ power_key = (EventTypes.PowerLevels, "")
+ if power_key in conflicted_state:
+ events = conflicted_state[power_key]
+ logger.debug("Resolving conflicted power levels %r", events)
+ resolved_state[power_key] = self._resolve_auth_events(
+ events, auth_events)
+
+ auth_events.update(resolved_state)
+
+ for key, events in conflicted_state.items():
+ if key[0] == EventTypes.JoinRules:
+ logger.debug("Resolving conflicted join rules %r", events)
+ resolved_state[key] = self._resolve_auth_events(
+ events,
+ auth_events
+ )
+
+ auth_events.update(resolved_state)
+
+ for key, events in conflicted_state.items():
+ if key[0] == EventTypes.Member:
+ logger.debug("Resolving conflicted member lists %r", events)
+ resolved_state[key] = self._resolve_auth_events(
+ events,
+ auth_events
+ )
+
+ auth_events.update(resolved_state)
+
+ for key, events in conflicted_state.items():
+ if key not in resolved_state:
+ logger.debug("Resolving conflicted state %r:%r", key, events)
+ resolved_state[key] = self._resolve_normal_events(
+ events, auth_events
+ )
+
+ return resolved_state
+
+ def _resolve_auth_events(self, events, auth_events):
+ reverse = [i for i in reversed(self._ordered_events(events))]
+
+ auth_events = dict(auth_events)
+
+ prev_event = reverse[0]
+ for event in reverse[1:]:
+ auth_events[(prev_event.type, prev_event.state_key)] = prev_event
+ try:
+ # FIXME: hs.get_auth() is bad style, but we need to do it to
+ # get around circular deps.
+ self.hs.get_auth().check(event, auth_events)
+ prev_event = event
+ except AuthError:
+ return prev_event
+
+ return event
+
+ def _resolve_normal_events(self, events, auth_events):
+ for event in self._ordered_events(events):
+ try:
+ # FIXME: hs.get_auth() is bad style, but we need to do it to
+ # get around circular deps.
+ self.hs.get_auth().check(event, auth_events)
+ return event
+ except AuthError:
+ pass
+
+ # Use the last event (the one with the least depth) if they all fail
+ # the auth check.
+ return event
+
+ def _ordered_events(self, events):
+ def key_func(e):
+ return -int(e.depth), hashlib.sha1(e.event_id).hexdigest()
+
+ return sorted(events, key=key_func)
diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html
new file mode 100644
index 00000000..96c8723c
--- /dev/null
+++ b/synapse/static/client/login/index.html
@@ -0,0 +1,50 @@
+<html>
+<head>
+<title> Login </title>
+<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="style.css">
+<script src="js/jquery-2.1.3.min.js"></script>
+<script src="js/login.js"></script>
+</head>
+<body onload="matrixLogin.onLoad()">
+ <center>
+ <br/>
+ <h1>Log in with one of the following methods</h1>
+
+ <span id="feedback" style="color: #f00"></span>
+ <br/>
+ <br/>
+
+ <div id="loading">
+ <img src="spinner.gif" />
+ </div>
+
+ <div id="cas_flow" class="login_flow" style="display:none"
+ onclick="gotoCas(); return false;">
+ CAS Authentication: <button id="cas_button" style="margin: 10px">Log in</button>
+ </div>
+
+ <br/>
+
+ <form id="password_form" class="login_flow" style="display:none"
+ onsubmit="matrixLogin.password_login(); return false;">
+ <div>
+ Password Authentication:<br/>
+
+ <div style="text-align: center">
+ <input id="user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
+ <br/>
+ <input id="password" size="32" type="password" placeholder="Password"/>
+ <br/>
+
+ <button type="submit" style="margin: 10px">Log in</button>
+ </div>
+ </div>
+ </form>
+
+ <div id="no_login_types" type="button" class="login_flow" style="display:none">
+ Log in currently unavailable.
+ </div>
+ </center>
+</body>
+</html>
diff --git a/synapse/static/client/login/js/jquery-2.1.3.min.js b/synapse/static/client/login/js/jquery-2.1.3.min.js
new file mode 100644
index 00000000..25714ed2
--- /dev/null
+++ b/synapse/static/client/login/js/jquery-2.1.3.min.js
@@ -0,0 +1,4 @@
+/*! jQuery v2.1.3 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m="2.1.3",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=l.createElement("script"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=hb(),z=hb(),A=hb(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},eb=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fb){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function gb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+rb(o[l]);w=ab.test(a)&&pb(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function hb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ib(a){return a[u]=!0,a}function jb(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function kb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function lb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function nb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function ob(a){return ib(function(b){return b=+b,ib(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pb(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=gb.support={},f=gb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=gb.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",eb,!1):e.attachEvent&&e.attachEvent("onunload",eb)),p=!f(g),c.attributes=jb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=jb(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=jb(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(jb(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\f]' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),jb(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&jb(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return lb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?lb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},gb.matches=function(a,b){return gb(a,null,null,b)},gb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return gb(b,n,null,[a]).length>0},gb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},gb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},gb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},gb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=gb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=gb.selectors={cacheLength:50,createPseudo:ib,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||gb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&gb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=gb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||gb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ib(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ib(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ib(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ib(function(a){return function(b){return gb(a,b).length>0}}),contains:ib(function(a){return a=a.replace(cb,db),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ib(function(a){return W.test(a||"")||gb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:ob(function(){return[0]}),last:ob(function(a,b){return[b-1]}),eq:ob(function(a,b,c){return[0>c?c+b:c]}),even:ob(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:ob(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:ob(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:ob(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=mb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=nb(b);function qb(){}qb.prototype=d.filters=d.pseudos,d.setFilters=new qb,g=gb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?gb.error(a):z(a,i).slice(0)};function rb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function sb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function tb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ub(a,b,c){for(var d=0,e=b.length;e>d;d++)gb(a,b[d],c);return c}function vb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wb(a,b,c,d,e,f){return d&&!d[u]&&(d=wb(d)),e&&!e[u]&&(e=wb(e,f)),ib(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ub(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:vb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=vb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=vb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sb(function(a){return a===b},h,!0),l=sb(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sb(tb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wb(i>1&&tb(m),i>1&&rb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xb(a.slice(i,e)),f>e&&xb(a=a.slice(e)),f>e&&rb(a))}m.push(c)}return tb(m)}function yb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=vb(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&gb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ib(f):f}return h=gb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,yb(e,d)),f.selector=a}return f},i=gb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&pb(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&rb(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&pb(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=jb(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),jb(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||kb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&jb(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||kb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),jb(function(a){return null==a.getAttribute("disabled")})||kb(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),gb}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler("ready"),n(l).off("ready"))))}});function I(){l.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),"complete"===l.readyState?setTimeout(n.ready):(l.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){return M.access(a,b,c)
+},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=L.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var Q=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,R=["Top","Right","Bottom","Left"],S=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)},T=/^(?:checkbox|radio)$/i;!function(){var a=l.createDocumentFragment(),b=a.appendChild(l.createElement("div")),c=l.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";k.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||l,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[n.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=W.test(e)?this.mouseHooks:V.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new n.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=l),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==_()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===_()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&n.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=n.extend(new n.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?n.event.trigger(e,null,b):n.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?Z:$):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={isDefaultPrevented:$,isPropagationStopped:$,isImmediatePropagationStopped:$,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=Z,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=Z,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=Z,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!n.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.focusinBubbles||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a),!0)};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=L.access(d,b);e||d.addEventListener(a,c,!0),L.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=L.access(d,b)-1;e?L.access(d,b,e):(d.removeEventListener(a,c,!0),L.remove(d,b))}}}),n.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=$;else if(!d)return this;return 1===e&&(f=d,d=function(a){return n().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=n.guid++)),this.each(function(){n.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=$),this.each(function(){n.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var ab=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bb=/<([\w:]+)/,cb=/<|&#?\w+;/,db=/<(?:script|style|link)/i,eb=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/^$|\/(?:java|ecma)script/i,gb=/^true\/(.*)/,hb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ib={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ib.optgroup=ib.option,ib.tbody=ib.tfoot=ib.colgroup=ib.caption=ib.thead,ib.th=ib.td;function jb(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function kb(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function lb(a){var b=gb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function mb(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function nb(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function ob(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pb(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=ob(h),f=ob(a),d=0,e=f.length;e>d;d++)pb(f[d],g[d]);if(b)if(c)for(f=f||ob(a),g=g||ob(h),d=0,e=f.length;e>d;d++)nb(f[d],g[d]);else nb(a,h);return g=ob(h,"script"),g.length>0&&mb(g,!i&&ob(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if("object"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(cb.test(e)){f=f||k.appendChild(b.createElement("div")),g=(bb.exec(e)||["",""])[1].toLowerCase(),h=ib[g]||ib._default,f.innerHTML=h[1]+e.replace(ab,"<$1></$2>")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=ob(k.appendChild(e),"script"),i&&mb(f),c)){j=0;while(e=f[j++])fb.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(ob(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&mb(ob(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(ob(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ab,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ob(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(ob(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&eb.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(ob(c,"script"),kb),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,ob(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,lb),j=0;g>j;j++)h=f[j],fb.test(h.type||"")&&!L.access(h,"globalEval")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(hb,"")))}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qb,rb={};function sb(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],"display");return e.detach(),f}function tb(a){var b=l,c=rb[a];return c||(c=sb(a,b),"none"!==c&&c||(qb=(qb||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=qb[0].contentDocument,b.write(),b.close(),c=sb(a,b),qb.detach()),rb[a]=c),c}var ub=/^margin/,vb=new RegExp("^("+Q+")(?!px)[a-z%]+$","i"),wb=function(b){return b.ownerDocument.defaultView.opener?b.ownerDocument.defaultView.getComputedStyle(b,null):a.getComputedStyle(b,null)};function xb(a,b,c){var d,e,f,g,h=a.style;return c=c||wb(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),vb.test(g)&&ub.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function yb(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d=l.documentElement,e=l.createElement("div"),f=l.createElement("div");if(f.style){f.style.backgroundClip="content-box",f.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===f.style.backgroundClip,e.style.cssText="border:0;width:0;height:0;top:0;left:-9999px;margin-top:1px;position:absolute",e.appendChild(f);function g(){f.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",f.innerHTML="",d.appendChild(e);var g=a.getComputedStyle(f,null);b="1%"!==g.top,c="4px"===g.width,d.removeChild(e)}a.getComputedStyle&&n.extend(k,{pixelPosition:function(){return g(),b},boxSizingReliable:function(){return null==c&&g(),c},reliableMarginRight:function(){var b,c=f.appendChild(l.createElement("div"));return c.style.cssText=f.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",f.style.width="1px",d.appendChild(e),b=!parseFloat(a.getComputedStyle(c,null).marginRight),d.removeChild(e),f.removeChild(c),b}})}}(),n.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var zb=/^(none|table(?!-c[ea]).+)/,Ab=new RegExp("^("+Q+")(.*)$","i"),Bb=new RegExp("^([+-])=("+Q+")","i"),Cb={position:"absolute",visibility:"hidden",display:"block"},Db={letterSpacing:"0",fontWeight:"400"},Eb=["Webkit","O","Moz","ms"];function Fb(a,b){if(b in a)return b;var c=b[0].toUpperCase()+b.slice(1),d=b,e=Eb.length;while(e--)if(b=Eb[e]+c,b in a)return b;return d}function Gb(a,b,c){var d=Ab.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Hb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+R[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+R[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+R[f]+"Width",!0,e))):(g+=n.css(a,"padding"+R[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+R[f]+"Width",!0,e)));return g}function Ib(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=wb(a),g="border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=xb(a,b,f),(0>e||null==e)&&(e=a.style[b]),vb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Hb(a,b,c||(g?"border":"content"),d,f)+"px"}function Jb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=L.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&S(d)&&(f[g]=L.access(d,"olddisplay",tb(d.nodeName)))):(e=S(d),"none"===c&&e||L.set(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=xb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;return b=n.cssProps[h]||(n.cssProps[h]=Fb(i,h)),g=n.cssHooks[b]||n.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Bb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(n.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||n.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=Fb(a.style,h)),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=xb(a,b,d)),"normal"===e&&b in Db&&(e=Db[b]),""===c||c?(f=parseFloat(e),c===!0||n.isNumeric(f)?f||0:e):e}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?zb.test(n.css(a,"display"))&&0===a.offsetWidth?n.swap(a,Cb,function(){return Ib(a,b,d)}):Ib(a,b,d):void 0},set:function(a,c,d){var e=d&&wb(a);return Gb(a,c,d?Hb(a,b,d,"border-box"===n.css(a,"boxSizing",!1,e),e):0)}}}),n.cssHooks.marginRight=yb(k.reliableMarginRight,function(a,b){return b?n.swap(a,{display:"inline-block"},xb,[a,"marginRight"]):void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+R[d]+b]=f[d]||f[d-2]||f[0];return e}},ub.test(a)||(n.cssHooks[a+b].set=Gb)}),n.fn.extend({css:function(a,b){return J(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=wb(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return Jb(this,!0)},hide:function(){return Jb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){S(this)?n(this).show():n(this).hide()})}});function Kb(a,b,c,d,e){return new Kb.prototype.init(a,b,c,d,e)}n.Tween=Kb,Kb.prototype={constructor:Kb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=Kb.propHooks[this.prop];return a&&a.get?a.get(this):Kb.propHooks._default.get(this)},run:function(a){var b,c=Kb.propHooks[this.prop];return this.pos=b=this.options.duration?n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Kb.propHooks._default.set(this),this}},Kb.prototype.init.prototype=Kb.prototype,Kb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[n.cssProps[a.prop]]||n.cssHooks[a.prop])?n.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Kb.propHooks.scrollTop=Kb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},n.fx=Kb.prototype.init,n.fx.step={};var Lb,Mb,Nb=/^(?:toggle|show|hide)$/,Ob=new RegExp("^(?:([+-])=|)("+Q+")([a-z%]*)$","i"),Pb=/queueHooks$/,Qb=[Vb],Rb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=Ob.exec(b),f=e&&e[3]||(n.cssNumber[a]?"":"px"),g=(n.cssNumber[a]||"px"!==f&&+d)&&Ob.exec(n.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,n.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function Sb(){return setTimeout(function(){Lb=void 0}),Lb=n.now()}function Tb(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=R[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ub(a,b,c){for(var d,e=(Rb[b]||[]).concat(Rb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Vb(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},o=a.style,p=a.nodeType&&S(a),q=L.get(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=n.css(a,"display"),k="none"===j?L.get(a,"olddisplay")||tb(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(o.display="inline-block")),c.overflow&&(o.overflow="hidden",l.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Nb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}m[d]=q&&q[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(m))"inline"===("none"===j?tb(a.nodeName):j)&&(o.display=j);else{q?"hidden"in q&&(p=q.hidden):q=L.access(a,"fxshow",{}),f&&(q.hidden=!p),p?n(a).show():l.done(function(){n(a).hide()}),l.done(function(){var b;L.remove(a,"fxshow");for(b in m)n.style(a,b,m[b])});for(d in m)g=Ub(p?q[d]:0,d,l),d in q||(q[d]=g.start,p&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function Wb(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function Xb(a,b,c){var d,e,f=0,g=Qb.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Lb||Sb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:Lb||Sb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(Wb(k,j.opts.specialEasing);g>f;f++)if(d=Qb[f].call(j,a,k,j.opts))return d;return n.map(k,Ub,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(Xb,{tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],Rb[c]=Rb[c]||[],Rb[c].unshift(b)},prefilter:function(a,b){b?Qb.unshift(a):Qb.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(S).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=Xb(this,n.extend({},a),f);(e||L.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=L.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Pb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=L.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Tb(b,!0),a,d,e)}}),n.each({slideDown:Tb("show"),slideUp:Tb("hide"),slideToggle:Tb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=0,c=n.timers;for(Lb=n.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||n.fx.stop(),Lb=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){Mb||(Mb=setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){clearInterval(Mb),Mb=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(a,b){return a=n.fx?n.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=l.createElement("input"),b=l.createElement("select"),c=b.appendChild(l.createElement("option"));a.type="checkbox",k.checkOn=""!==a.value,k.optSelected=c.selected,b.disabled=!0,k.optDisabled=!c.disabled,a=l.createElement("input"),a.value="t",a.type="radio",k.radioValue="t"===a.value}();var Yb,Zb,$b=n.expr.attrHandle;n.fn.extend({attr:function(a,b){return J(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===U?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),d=n.attrHooks[b]||(n.expr.match.bool.test(b)?Zb:Yb)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=n.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void n.removeAttr(a,b))
+},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),Zb={set:function(a,b,c){return b===!1?n.removeAttr(a,c):a.setAttribute(c,c),c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=$b[b]||n.find.attr;$b[b]=function(a,b,d){var e,f;return d||(f=$b[b],$b[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,$b[b]=f),e}});var _b=/^(?:input|select|textarea|button)$/i;n.fn.extend({prop:function(a,b){return J(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[n.propFix[a]||a]})}}),n.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!n.isXMLDoc(a),f&&(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||_b.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),k.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this});var ac=/[\t\r\n\f]/g;n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=n.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?n.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(n.isFunction(a)?function(c){n(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=n(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===U||"boolean"===c)&&(this.className&&L.set(this,"__className__",this.className),this.className=this.className||a===!1?"":L.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ac," ").indexOf(b)>=0)return!0;return!1}});var bc=/\r/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(bc,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=n.inArray(d.value,f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>=0:void 0}},k.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var cc=n.now(),dc=/\?/;n.parseJSON=function(a){return JSON.parse(a+"")},n.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&n.error("Invalid XML: "+a),b};var ec=/#.*$/,fc=/([?&])_=[^&]*/,gc=/^(.*?):[ \t]*([^\r\n]*)$/gm,hc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,ic=/^(?:GET|HEAD)$/,jc=/^\/\//,kc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,lc={},mc={},nc="*/".concat("*"),oc=a.location.href,pc=kc.exec(oc.toLowerCase())||[];function qc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(n.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function rc(a,b,c,d){var e={},f=a===mc;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function sc(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&n.extend(!0,a,d),a}function tc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function uc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:oc,type:"GET",isLocal:hc.test(pc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":nc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?sc(sc(a,n.ajaxSettings),b):sc(n.ajaxSettings,a)},ajaxPrefilter:qc(lc),ajaxTransport:qc(mc),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=n.ajaxSetup({},b),l=k.context||k,m=k.context&&(l.nodeType||l.jquery)?n(l):n.event,o=n.Deferred(),p=n.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!f){f={};while(b=gc.exec(e))f[b[1].toLowerCase()]=b[2]}b=f[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?e:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return c&&c.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||oc)+"").replace(ec,"").replace(jc,pc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=n.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(h=kc.exec(k.url.toLowerCase()),k.crossDomain=!(!h||h[1]===pc[1]&&h[2]===pc[2]&&(h[3]||("http:"===h[1]?"80":"443"))===(pc[3]||("http:"===pc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=n.param(k.data,k.traditional)),rc(lc,k,b,v),2===t)return v;i=n.event&&k.global,i&&0===n.active++&&n.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!ic.test(k.type),d=k.url,k.hasContent||(k.data&&(d=k.url+=(dc.test(d)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=fc.test(d)?d.replace(fc,"$1_="+cc++):d+(dc.test(d)?"&":"?")+"_="+cc++)),k.ifModified&&(n.lastModified[d]&&v.setRequestHeader("If-Modified-Since",n.lastModified[d]),n.etag[d]&&v.setRequestHeader("If-None-Match",n.etag[d])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+nc+"; q=0.01":""):k.accepts["*"]);for(j in k.headers)v.setRequestHeader(j,k.headers[j]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(j in{success:1,error:1,complete:1})v[j](k[j]);if(c=rc(mc,k,b,v)){v.readyState=1,i&&m.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,c.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,f,h){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),c=void 0,e=h||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,f&&(u=tc(k,v,f)),u=uc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(n.lastModified[d]=w),w=v.getResponseHeader("etag"),w&&(n.etag[d]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,i&&m.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),i&&(m.trigger("ajaxComplete",[v,k]),--n.active||n.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){var b;return n.isFunction(a)?this.each(function(b){n(this).wrapAll(a.call(this,b))}):(this[0]&&(b=n(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return this.each(n.isFunction(a)?function(b){n(this).wrapInner(a.call(this,b))}:function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}}),n.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var vc=/%20/g,wc=/\[\]$/,xc=/\r?\n/g,yc=/^(?:submit|button|image|reset|file)$/i,zc=/^(?:input|select|textarea|keygen)/i;function Ac(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||wc.test(a)?d(a,e):Ac(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)Ac(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)Ac(c,a[c],b,e);return d.join("&").replace(vc,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&zc.test(this.nodeName)&&!yc.test(a)&&(this.checked||!T.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(xc,"\r\n")}}):{name:b.name,value:c.replace(xc,"\r\n")}}).get()}}),n.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Bc=0,Cc={},Dc={0:200,1223:204},Ec=n.ajaxSettings.xhr();a.attachEvent&&a.attachEvent("onunload",function(){for(var a in Cc)Cc[a]()}),k.cors=!!Ec&&"withCredentials"in Ec,k.ajax=Ec=!!Ec,n.ajaxTransport(function(a){var b;return k.cors||Ec&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Bc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Cc[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Dc[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Cc[g]=b("abort");try{f.send(a.hasContent&&a.data||null)}catch(h){if(b)throw h}},abort:function(){b&&b()}}:void 0}),n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=n("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),l.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Fc=[],Gc=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Fc.pop()||n.expando+"_"+cc++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Gc.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Gc.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Gc,"$1"+e):b.jsonp!==!1&&(b.url+=(dc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Fc.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||l;var d=v.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=n.buildFragment([a],b,e),e&&e.length&&n(e).remove(),n.merge([],d.childNodes))};var Hc=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&Hc)return Hc.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=n.trim(a.slice(h)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};var Ic=a.document.documentElement;function Jc(a){return n.isWindow(a)?a:9===a.nodeType&&a.defaultView}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,n.contains(b,d)?(typeof d.getBoundingClientRect!==U&&(e=d.getBoundingClientRect()),c=Jc(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===n.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(d=a.offset()),d.top+=n.css(a[0],"borderTopWidth",!0),d.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-n.css(c,"marginTop",!0),left:b.left-d.left-n.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||Ic;while(a&&!n.nodeName(a,"html")&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Ic})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;n.fn[b]=function(e){return J(this,function(b,e,f){var g=Jc(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=yb(k.pixelPosition,function(a,c){return c?(c=xb(a,b),vb.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return J(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.size=function(){return this.length},n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var Kc=a.jQuery,Lc=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=Lc),b&&a.jQuery===n&&(a.jQuery=Kc),n},typeof b===U&&(a.jQuery=a.$=n),n});
diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js
new file mode 100644
index 00000000..bfb73860
--- /dev/null
+++ b/synapse/static/client/login/js/login.js
@@ -0,0 +1,153 @@
+window.matrixLogin = {
+ endpoint: location.origin + "/_matrix/client/api/v1/login",
+ serverAcceptsPassword: false,
+ serverAcceptsCas: false
+};
+
+var submitPassword = function(user, pwd) {
+ console.log("Logging in with password...");
+ var data = {
+ type: "m.login.password",
+ user: user,
+ password: pwd,
+ };
+ $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) {
+ show_login();
+ matrixLogin.onLogin(response);
+ }).error(errorFunc);
+};
+
+var submitToken = function(loginToken) {
+ console.log("Logging in with login token...");
+ var data = {
+ type: "m.login.token",
+ token: loginToken
+ };
+ $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) {
+ show_login();
+ matrixLogin.onLogin(response);
+ }).error(errorFunc);
+};
+
+var errorFunc = function(err) {
+ show_login();
+
+ if (err.responseJSON && err.responseJSON.error) {
+ setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
+ }
+ else {
+ setFeedbackString("Request failed: " + err.status);
+ }
+};
+
+var gotoCas = function() {
+ var this_page = window.location.origin + window.location.pathname;
+ var redirect_url = matrixLogin.endpoint + "/cas/redirect?redirectUrl=" + encodeURIComponent(this_page);
+ window.location.replace(redirect_url);
+}
+
+var setFeedbackString = function(text) {
+ $("#feedback").text(text);
+};
+
+var show_login = function() {
+ $("#loading").hide();
+
+ if (matrixLogin.serverAcceptsPassword) {
+ $("#password_form").show();
+ }
+
+ if (matrixLogin.serverAcceptsCas) {
+ $("#cas_flow").show();
+ }
+
+ if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas) {
+ $("#no_login_types").show();
+ }
+};
+
+var show_spinner = function() {
+ $("#password_form").hide();
+ $("#cas_flow").hide();
+ $("#no_login_types").hide();
+ $("#loading").show();
+};
+
+
+var fetch_info = function(cb) {
+ $.get(matrixLogin.endpoint, function(response) {
+ var serverAcceptsPassword = false;
+ var serverAcceptsCas = false;
+ for (var i=0; i<response.flows.length; i++) {
+ var flow = response.flows[i];
+ if ("m.login.cas" === flow.type) {
+ matrixLogin.serverAcceptsCas = true;
+ console.log("Server accepts CAS");
+ }
+
+ if ("m.login.password" === flow.type) {
+ matrixLogin.serverAcceptsPassword = true;
+ console.log("Server accepts password");
+ }
+ }
+
+ cb();
+ }).error(errorFunc);
+}
+
+matrixLogin.onLoad = function() {
+ fetch_info(function() {
+ if (!try_token()) {
+ show_login();
+ }
+ });
+};
+
+matrixLogin.password_login = function() {
+ var user = $("#user_id").val();
+ var pwd = $("#password").val();
+
+ setFeedbackString("");
+
+ show_spinner();
+ submitPassword(user, pwd);
+};
+
+matrixLogin.onLogin = function(response) {
+ // clobber this function
+ console.log("onLogin - This function should be replaced to proceed.");
+ console.log(response);
+};
+
+var parseQsFromUrl = function(query) {
+ var result = {};
+ query.split("&").forEach(function(part) {
+ var item = part.split("=");
+ var key = item[0];
+ var val = item[1];
+
+ if (val) {
+ val = decodeURIComponent(val);
+ }
+ result[key] = val
+ });
+ return result;
+};
+
+var try_token = function() {
+ var pos = window.location.href.indexOf("?");
+ if (pos == -1) {
+ return false;
+ }
+ var qs = parseQsFromUrl(window.location.href.substr(pos+1));
+
+ var loginToken = qs.loginToken;
+
+ if (!loginToken) {
+ return false;
+ }
+
+ submitToken(loginToken);
+
+ return true;
+};
diff --git a/synapse/static/client/login/spinner.gif b/synapse/static/client/login/spinner.gif
new file mode 100644
index 00000000..12c24df7
--- /dev/null
+++ b/synapse/static/client/login/spinner.gif
Binary files differ
diff --git a/synapse/static/client/login/style.css b/synapse/static/client/login/style.css
new file mode 100644
index 00000000..73da0b51
--- /dev/null
+++ b/synapse/static/client/login/style.css
@@ -0,0 +1,57 @@
+html {
+ height: 100%;
+}
+
+body {
+ height: 100%;
+ font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
+ font-size: 12pt;
+ margin: 0px;
+}
+
+h1 {
+ font-size: 20pt;
+}
+
+a:link { color: #666; }
+a:visited { color: #666; }
+a:hover { color: #000; }
+a:active { color: #000; }
+
+input {
+ width: 90%
+}
+
+textarea, input {
+ font-family: inherit;
+ font-size: inherit;
+ margin: 5px;
+}
+
+.smallPrint {
+ color: #888;
+ font-size: 9pt ! important;
+ font-style: italic ! important;
+}
+
+.g-recaptcha div {
+ margin: auto;
+}
+
+.login_flow {
+ text-align: left;
+ padding: 10px;
+ margin-bottom: 40px;
+ display: inline-block;
+
+ -webkit-border-radius: 10px;
+ -moz-border-radius: 10px;
+ border-radius: 10px;
+
+ -webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+ -moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+ box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+
+ background-color: #f8f8f8;
+ border: 1px #ccc solid;
+}
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
new file mode 100644
index 00000000..600b3ee4
--- /dev/null
+++ b/synapse/static/client/register/index.html
@@ -0,0 +1,32 @@
+<html>
+<head>
+<title> Registration </title>
+<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<link rel="stylesheet" href="style.css">
+<script src="js/jquery-2.1.3.min.js"></script>
+<script src="js/recaptcha_ajax.js"></script>
+<script src="register_config.js"></script>
+<script src="js/register.js"></script>
+</head>
+<body onload="matrixRegistration.onLoad()">
+<form id="registrationForm" onsubmit="matrixRegistration.signUp(); return false;">
+ <div>
+ Create account:<br/>
+
+ <div style="text-align: center">
+ <input id="desired_user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
+ <br/>
+ <input id="pwd1" size="32" type="password" placeholder="Type a password"/>
+ <br/>
+ <input id="pwd2" size="32" type="password" placeholder="Confirm your password"/>
+ <br/>
+ <span id="feedback" style="color: #f00"></span>
+ <br/>
+ <div id="regcaptcha"></div>
+
+ <button type="submit" style="margin: 10px">Sign up</button>
+ </div>
+ </div>
+</form>
+</body>
+</html>
diff --git a/synapse/static/client/register/js/jquery-2.1.3.min.js b/synapse/static/client/register/js/jquery-2.1.3.min.js
new file mode 100644
index 00000000..25714ed2
--- /dev/null
+++ b/synapse/static/client/register/js/jquery-2.1.3.min.js
@@ -0,0 +1,4 @@
+/*! jQuery v2.1.3 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m="2.1.3",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=l.createElement("script"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=hb(),z=hb(),A=hb(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},eb=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fb){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function gb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+rb(o[l]);w=ab.test(a)&&pb(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function hb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ib(a){return a[u]=!0,a}function jb(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function kb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function lb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function nb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function ob(a){return ib(function(b){return b=+b,ib(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pb(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=gb.support={},f=gb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=gb.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",eb,!1):e.attachEvent&&e.attachEvent("onunload",eb)),p=!f(g),c.attributes=jb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=jb(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=jb(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(jb(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\f]' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),jb(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&jb(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return lb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?lb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},gb.matches=function(a,b){return gb(a,null,null,b)},gb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return gb(b,n,null,[a]).length>0},gb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},gb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},gb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},gb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=gb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=gb.selectors={cacheLength:50,createPseudo:ib,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||gb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&gb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=gb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||gb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ib(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ib(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ib(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ib(function(a){return function(b){return gb(a,b).length>0}}),contains:ib(function(a){return a=a.replace(cb,db),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ib(function(a){return W.test(a||"")||gb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:ob(function(){return[0]}),last:ob(function(a,b){return[b-1]}),eq:ob(function(a,b,c){return[0>c?c+b:c]}),even:ob(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:ob(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:ob(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:ob(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=mb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=nb(b);function qb(){}qb.prototype=d.filters=d.pseudos,d.setFilters=new qb,g=gb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?gb.error(a):z(a,i).slice(0)};function rb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function sb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function tb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ub(a,b,c){for(var d=0,e=b.length;e>d;d++)gb(a,b[d],c);return c}function vb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wb(a,b,c,d,e,f){return d&&!d[u]&&(d=wb(d)),e&&!e[u]&&(e=wb(e,f)),ib(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ub(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:vb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=vb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=vb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sb(function(a){return a===b},h,!0),l=sb(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sb(tb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wb(i>1&&tb(m),i>1&&rb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xb(a.slice(i,e)),f>e&&xb(a=a.slice(e)),f>e&&rb(a))}m.push(c)}return tb(m)}function yb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=vb(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&gb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ib(f):f}return h=gb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,yb(e,d)),f.selector=a}return f},i=gb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&pb(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&rb(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&pb(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=jb(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),jb(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||kb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&jb(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||kb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),jb(function(a){return null==a.getAttribute("disabled")})||kb(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),gb}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler("ready"),n(l).off("ready"))))}});function I(){l.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),"complete"===l.readyState?setTimeout(n.ready):(l.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){return M.access(a,b,c)
+},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=L.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var Q=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,R=["Top","Right","Bottom","Left"],S=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)},T=/^(?:checkbox|radio)$/i;!function(){var a=l.createDocumentFragment(),b=a.appendChild(l.createElement("div")),c=l.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";k.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||l,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[n.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=W.test(e)?this.mouseHooks:V.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new n.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=l),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==_()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===_()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&n.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=n.extend(new n.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?n.event.trigger(e,null,b):n.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?Z:$):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={isDefaultPrevented:$,isPropagationStopped:$,isImmediatePropagationStopped:$,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=Z,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=Z,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=Z,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!n.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.focusinBubbles||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a),!0)};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=L.access(d,b);e||d.addEventListener(a,c,!0),L.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=L.access(d,b)-1;e?L.access(d,b,e):(d.removeEventListener(a,c,!0),L.remove(d,b))}}}),n.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=$;else if(!d)return this;return 1===e&&(f=d,d=function(a){return n().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=n.guid++)),this.each(function(){n.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=$),this.each(function(){n.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var ab=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bb=/<([\w:]+)/,cb=/<|&#?\w+;/,db=/<(?:script|style|link)/i,eb=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/^$|\/(?:java|ecma)script/i,gb=/^true\/(.*)/,hb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ib={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ib.optgroup=ib.option,ib.tbody=ib.tfoot=ib.colgroup=ib.caption=ib.thead,ib.th=ib.td;function jb(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function kb(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function lb(a){var b=gb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function mb(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function nb(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function ob(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pb(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=ob(h),f=ob(a),d=0,e=f.length;e>d;d++)pb(f[d],g[d]);if(b)if(c)for(f=f||ob(a),g=g||ob(h),d=0,e=f.length;e>d;d++)nb(f[d],g[d]);else nb(a,h);return g=ob(h,"script"),g.length>0&&mb(g,!i&&ob(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if("object"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(cb.test(e)){f=f||k.appendChild(b.createElement("div")),g=(bb.exec(e)||["",""])[1].toLowerCase(),h=ib[g]||ib._default,f.innerHTML=h[1]+e.replace(ab,"<$1></$2>")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=ob(k.appendChild(e),"script"),i&&mb(f),c)){j=0;while(e=f[j++])fb.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(ob(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&mb(ob(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(ob(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ab,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ob(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(ob(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&eb.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(ob(c,"script"),kb),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,ob(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,lb),j=0;g>j;j++)h=f[j],fb.test(h.type||"")&&!L.access(h,"globalEval")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(hb,"")))}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qb,rb={};function sb(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],"display");return e.detach(),f}function tb(a){var b=l,c=rb[a];return c||(c=sb(a,b),"none"!==c&&c||(qb=(qb||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=qb[0].contentDocument,b.write(),b.close(),c=sb(a,b),qb.detach()),rb[a]=c),c}var ub=/^margin/,vb=new RegExp("^("+Q+")(?!px)[a-z%]+$","i"),wb=function(b){return b.ownerDocument.defaultView.opener?b.ownerDocument.defaultView.getComputedStyle(b,null):a.getComputedStyle(b,null)};function xb(a,b,c){var d,e,f,g,h=a.style;return c=c||wb(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),vb.test(g)&&ub.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function yb(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d=l.documentElement,e=l.createElement("div"),f=l.createElement("div");if(f.style){f.style.backgroundClip="content-box",f.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===f.style.backgroundClip,e.style.cssText="border:0;width:0;height:0;top:0;left:-9999px;margin-top:1px;position:absolute",e.appendChild(f);function g(){f.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",f.innerHTML="",d.appendChild(e);var g=a.getComputedStyle(f,null);b="1%"!==g.top,c="4px"===g.width,d.removeChild(e)}a.getComputedStyle&&n.extend(k,{pixelPosition:function(){return g(),b},boxSizingReliable:function(){return null==c&&g(),c},reliableMarginRight:function(){var b,c=f.appendChild(l.createElement("div"));return c.style.cssText=f.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",f.style.width="1px",d.appendChild(e),b=!parseFloat(a.getComputedStyle(c,null).marginRight),d.removeChild(e),f.removeChild(c),b}})}}(),n.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var zb=/^(none|table(?!-c[ea]).+)/,Ab=new RegExp("^("+Q+")(.*)$","i"),Bb=new RegExp("^([+-])=("+Q+")","i"),Cb={position:"absolute",visibility:"hidden",display:"block"},Db={letterSpacing:"0",fontWeight:"400"},Eb=["Webkit","O","Moz","ms"];function Fb(a,b){if(b in a)return b;var c=b[0].toUpperCase()+b.slice(1),d=b,e=Eb.length;while(e--)if(b=Eb[e]+c,b in a)return b;return d}function Gb(a,b,c){var d=Ab.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Hb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+R[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+R[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+R[f]+"Width",!0,e))):(g+=n.css(a,"padding"+R[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+R[f]+"Width",!0,e)));return g}function Ib(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=wb(a),g="border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=xb(a,b,f),(0>e||null==e)&&(e=a.style[b]),vb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Hb(a,b,c||(g?"border":"content"),d,f)+"px"}function Jb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=L.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&S(d)&&(f[g]=L.access(d,"olddisplay",tb(d.nodeName)))):(e=S(d),"none"===c&&e||L.set(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=xb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;return b=n.cssProps[h]||(n.cssProps[h]=Fb(i,h)),g=n.cssHooks[b]||n.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Bb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(n.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||n.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=Fb(a.style,h)),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=xb(a,b,d)),"normal"===e&&b in Db&&(e=Db[b]),""===c||c?(f=parseFloat(e),c===!0||n.isNumeric(f)?f||0:e):e}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?zb.test(n.css(a,"display"))&&0===a.offsetWidth?n.swap(a,Cb,function(){return Ib(a,b,d)}):Ib(a,b,d):void 0},set:function(a,c,d){var e=d&&wb(a);return Gb(a,c,d?Hb(a,b,d,"border-box"===n.css(a,"boxSizing",!1,e),e):0)}}}),n.cssHooks.marginRight=yb(k.reliableMarginRight,function(a,b){return b?n.swap(a,{display:"inline-block"},xb,[a,"marginRight"]):void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+R[d]+b]=f[d]||f[d-2]||f[0];return e}},ub.test(a)||(n.cssHooks[a+b].set=Gb)}),n.fn.extend({css:function(a,b){return J(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=wb(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return Jb(this,!0)},hide:function(){return Jb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){S(this)?n(this).show():n(this).hide()})}});function Kb(a,b,c,d,e){return new Kb.prototype.init(a,b,c,d,e)}n.Tween=Kb,Kb.prototype={constructor:Kb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=Kb.propHooks[this.prop];return a&&a.get?a.get(this):Kb.propHooks._default.get(this)},run:function(a){var b,c=Kb.propHooks[this.prop];return this.pos=b=this.options.duration?n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Kb.propHooks._default.set(this),this}},Kb.prototype.init.prototype=Kb.prototype,Kb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[n.cssProps[a.prop]]||n.cssHooks[a.prop])?n.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Kb.propHooks.scrollTop=Kb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},n.fx=Kb.prototype.init,n.fx.step={};var Lb,Mb,Nb=/^(?:toggle|show|hide)$/,Ob=new RegExp("^(?:([+-])=|)("+Q+")([a-z%]*)$","i"),Pb=/queueHooks$/,Qb=[Vb],Rb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=Ob.exec(b),f=e&&e[3]||(n.cssNumber[a]?"":"px"),g=(n.cssNumber[a]||"px"!==f&&+d)&&Ob.exec(n.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,n.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function Sb(){return setTimeout(function(){Lb=void 0}),Lb=n.now()}function Tb(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=R[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ub(a,b,c){for(var d,e=(Rb[b]||[]).concat(Rb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Vb(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},o=a.style,p=a.nodeType&&S(a),q=L.get(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=n.css(a,"display"),k="none"===j?L.get(a,"olddisplay")||tb(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(o.display="inline-block")),c.overflow&&(o.overflow="hidden",l.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Nb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}m[d]=q&&q[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(m))"inline"===("none"===j?tb(a.nodeName):j)&&(o.display=j);else{q?"hidden"in q&&(p=q.hidden):q=L.access(a,"fxshow",{}),f&&(q.hidden=!p),p?n(a).show():l.done(function(){n(a).hide()}),l.done(function(){var b;L.remove(a,"fxshow");for(b in m)n.style(a,b,m[b])});for(d in m)g=Ub(p?q[d]:0,d,l),d in q||(q[d]=g.start,p&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function Wb(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function Xb(a,b,c){var d,e,f=0,g=Qb.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Lb||Sb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:Lb||Sb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(Wb(k,j.opts.specialEasing);g>f;f++)if(d=Qb[f].call(j,a,k,j.opts))return d;return n.map(k,Ub,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(Xb,{tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],Rb[c]=Rb[c]||[],Rb[c].unshift(b)},prefilter:function(a,b){b?Qb.unshift(a):Qb.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(S).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=Xb(this,n.extend({},a),f);(e||L.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=L.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Pb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=L.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Tb(b,!0),a,d,e)}}),n.each({slideDown:Tb("show"),slideUp:Tb("hide"),slideToggle:Tb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=0,c=n.timers;for(Lb=n.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||n.fx.stop(),Lb=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){Mb||(Mb=setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){clearInterval(Mb),Mb=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(a,b){return a=n.fx?n.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=l.createElement("input"),b=l.createElement("select"),c=b.appendChild(l.createElement("option"));a.type="checkbox",k.checkOn=""!==a.value,k.optSelected=c.selected,b.disabled=!0,k.optDisabled=!c.disabled,a=l.createElement("input"),a.value="t",a.type="radio",k.radioValue="t"===a.value}();var Yb,Zb,$b=n.expr.attrHandle;n.fn.extend({attr:function(a,b){return J(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===U?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),d=n.attrHooks[b]||(n.expr.match.bool.test(b)?Zb:Yb)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=n.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void n.removeAttr(a,b))
+},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),Zb={set:function(a,b,c){return b===!1?n.removeAttr(a,c):a.setAttribute(c,c),c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=$b[b]||n.find.attr;$b[b]=function(a,b,d){var e,f;return d||(f=$b[b],$b[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,$b[b]=f),e}});var _b=/^(?:input|select|textarea|button)$/i;n.fn.extend({prop:function(a,b){return J(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[n.propFix[a]||a]})}}),n.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!n.isXMLDoc(a),f&&(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||_b.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),k.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this});var ac=/[\t\r\n\f]/g;n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=n.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?n.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(n.isFunction(a)?function(c){n(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=n(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===U||"boolean"===c)&&(this.className&&L.set(this,"__className__",this.className),this.className=this.className||a===!1?"":L.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ac," ").indexOf(b)>=0)return!0;return!1}});var bc=/\r/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(bc,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=n.inArray(d.value,f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>=0:void 0}},k.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var cc=n.now(),dc=/\?/;n.parseJSON=function(a){return JSON.parse(a+"")},n.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&n.error("Invalid XML: "+a),b};var ec=/#.*$/,fc=/([?&])_=[^&]*/,gc=/^(.*?):[ \t]*([^\r\n]*)$/gm,hc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,ic=/^(?:GET|HEAD)$/,jc=/^\/\//,kc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,lc={},mc={},nc="*/".concat("*"),oc=a.location.href,pc=kc.exec(oc.toLowerCase())||[];function qc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(n.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function rc(a,b,c,d){var e={},f=a===mc;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function sc(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&n.extend(!0,a,d),a}function tc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function uc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:oc,type:"GET",isLocal:hc.test(pc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":nc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?sc(sc(a,n.ajaxSettings),b):sc(n.ajaxSettings,a)},ajaxPrefilter:qc(lc),ajaxTransport:qc(mc),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=n.ajaxSetup({},b),l=k.context||k,m=k.context&&(l.nodeType||l.jquery)?n(l):n.event,o=n.Deferred(),p=n.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!f){f={};while(b=gc.exec(e))f[b[1].toLowerCase()]=b[2]}b=f[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?e:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return c&&c.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||oc)+"").replace(ec,"").replace(jc,pc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=n.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(h=kc.exec(k.url.toLowerCase()),k.crossDomain=!(!h||h[1]===pc[1]&&h[2]===pc[2]&&(h[3]||("http:"===h[1]?"80":"443"))===(pc[3]||("http:"===pc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=n.param(k.data,k.traditional)),rc(lc,k,b,v),2===t)return v;i=n.event&&k.global,i&&0===n.active++&&n.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!ic.test(k.type),d=k.url,k.hasContent||(k.data&&(d=k.url+=(dc.test(d)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=fc.test(d)?d.replace(fc,"$1_="+cc++):d+(dc.test(d)?"&":"?")+"_="+cc++)),k.ifModified&&(n.lastModified[d]&&v.setRequestHeader("If-Modified-Since",n.lastModified[d]),n.etag[d]&&v.setRequestHeader("If-None-Match",n.etag[d])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+nc+"; q=0.01":""):k.accepts["*"]);for(j in k.headers)v.setRequestHeader(j,k.headers[j]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(j in{success:1,error:1,complete:1})v[j](k[j]);if(c=rc(mc,k,b,v)){v.readyState=1,i&&m.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,c.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,f,h){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),c=void 0,e=h||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,f&&(u=tc(k,v,f)),u=uc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(n.lastModified[d]=w),w=v.getResponseHeader("etag"),w&&(n.etag[d]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,i&&m.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),i&&(m.trigger("ajaxComplete",[v,k]),--n.active||n.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){var b;return n.isFunction(a)?this.each(function(b){n(this).wrapAll(a.call(this,b))}):(this[0]&&(b=n(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return this.each(n.isFunction(a)?function(b){n(this).wrapInner(a.call(this,b))}:function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}}),n.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var vc=/%20/g,wc=/\[\]$/,xc=/\r?\n/g,yc=/^(?:submit|button|image|reset|file)$/i,zc=/^(?:input|select|textarea|keygen)/i;function Ac(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||wc.test(a)?d(a,e):Ac(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)Ac(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)Ac(c,a[c],b,e);return d.join("&").replace(vc,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&zc.test(this.nodeName)&&!yc.test(a)&&(this.checked||!T.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(xc,"\r\n")}}):{name:b.name,value:c.replace(xc,"\r\n")}}).get()}}),n.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Bc=0,Cc={},Dc={0:200,1223:204},Ec=n.ajaxSettings.xhr();a.attachEvent&&a.attachEvent("onunload",function(){for(var a in Cc)Cc[a]()}),k.cors=!!Ec&&"withCredentials"in Ec,k.ajax=Ec=!!Ec,n.ajaxTransport(function(a){var b;return k.cors||Ec&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Bc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Cc[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Dc[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Cc[g]=b("abort");try{f.send(a.hasContent&&a.data||null)}catch(h){if(b)throw h}},abort:function(){b&&b()}}:void 0}),n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=n("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),l.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Fc=[],Gc=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Fc.pop()||n.expando+"_"+cc++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Gc.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Gc.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Gc,"$1"+e):b.jsonp!==!1&&(b.url+=(dc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Fc.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||l;var d=v.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=n.buildFragment([a],b,e),e&&e.length&&n(e).remove(),n.merge([],d.childNodes))};var Hc=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&Hc)return Hc.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=n.trim(a.slice(h)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};var Ic=a.document.documentElement;function Jc(a){return n.isWindow(a)?a:9===a.nodeType&&a.defaultView}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,n.contains(b,d)?(typeof d.getBoundingClientRect!==U&&(e=d.getBoundingClientRect()),c=Jc(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===n.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(d=a.offset()),d.top+=n.css(a[0],"borderTopWidth",!0),d.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-n.css(c,"marginTop",!0),left:b.left-d.left-n.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||Ic;while(a&&!n.nodeName(a,"html")&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Ic})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;n.fn[b]=function(e){return J(this,function(b,e,f){var g=Jc(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=yb(k.pixelPosition,function(a,c){return c?(c=xb(a,b),vb.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return J(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.size=function(){return this.length},n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var Kc=a.jQuery,Lc=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=Lc),b&&a.jQuery===n&&(a.jQuery=Kc),n},typeof b===U&&(a.jQuery=a.$=n),n});
diff --git a/synapse/static/client/register/js/recaptcha_ajax.js b/synapse/static/client/register/js/recaptcha_ajax.js
new file mode 100644
index 00000000..d0e71e5b
--- /dev/null
+++ b/synapse/static/client/register/js/recaptcha_ajax.js
@@ -0,0 +1,195 @@
+(function(){var h,k=this,l=function(a){return void 0!==a},ba=function(){},n=function(a){var b=typeof a;if("object"==b)if(a){if(a instanceof Array)return"array";if(a instanceof Object)return b;var c=Object.prototype.toString.call(a);if("[object Window]"==c)return"object";if("[object Array]"==c||"number"==typeof a.length&&"undefined"!=typeof a.splice&&"undefined"!=typeof a.propertyIsEnumerable&&!a.propertyIsEnumerable("splice"))return"array";if("[object Function]"==c||"undefined"!=typeof a.call&&"undefined"!=typeof a.propertyIsEnumerable&&
+!a.propertyIsEnumerable("call"))return"function"}else return"null";else if("function"==b&&"undefined"==typeof a.call)return"object";return b},p=function(a){return"array"==n(a)},ca=function(a){var b=n(a);return"array"==b||"object"==b&&"number"==typeof a.length},q=function(a){return"string"==typeof a},r=function(a){return"function"==n(a)},da=function(a){var b=typeof a;return"object"==b&&null!=a||"function"==b},ea=function(a,b,c){return a.call.apply(a.bind,arguments)},fa=function(a,b,c){if(!a)throw Error();
+if(2<arguments.length){var d=Array.prototype.slice.call(arguments,2);return function(){var c=Array.prototype.slice.call(arguments);Array.prototype.unshift.apply(c,d);return a.apply(b,c)}}return function(){return a.apply(b,arguments)}},s=function(a,b,c){s=Function.prototype.bind&&-1!=Function.prototype.bind.toString().indexOf("native code")?ea:fa;return s.apply(null,arguments)},ga=function(a,b){var c=Array.prototype.slice.call(arguments,1);return function(){var b=c.slice();b.push.apply(b,arguments);
+return a.apply(this,b)}},ha=Date.now||function(){return+new Date},ia=null,t=function(a,b){var c=a.split("."),d=k;c[0]in d||!d.execScript||d.execScript("var "+c[0]);for(var e;c.length&&(e=c.shift());)!c.length&&l(b)?d[e]=b:d=d[e]?d[e]:d[e]={}},u=function(a,b){function c(){}c.prototype=b.prototype;a.superClass_=b.prototype;a.prototype=new c;a.base=function(a,c,g){return b.prototype[c].apply(a,Array.prototype.slice.call(arguments,2))}};
+Function.prototype.bind=Function.prototype.bind||function(a,b){if(1<arguments.length){var c=Array.prototype.slice.call(arguments,1);c.unshift(this,a);return s.apply(null,c)}return s(this,a)};var v={};t("RecaptchaTemplates",v);v.VertHtml='<table id="recaptcha_table" class="recaptchatable" > <tr> <td colspan="6" class=\'recaptcha_r1_c1\'></td> </tr> <tr> <td class=\'recaptcha_r2_c1\'></td> <td colspan="4" class=\'recaptcha_image_cell\'><center><div id="recaptcha_image"></div></center></td> <td class=\'recaptcha_r2_c2\'></td> </tr> <tr> <td rowspan="6" class=\'recaptcha_r3_c1\'></td> <td colspan="4" class=\'recaptcha_r3_c2\'></td> <td rowspan="6" class=\'recaptcha_r3_c3\'></td> </tr> <tr> <td rowspan="3" class=\'recaptcha_r4_c1\' height="49"> <div class="recaptcha_input_area"> <input name="recaptcha_response_field" id="recaptcha_response_field" type="text" autocorrect="off" autocapitalize="off" placeholder="" /> <span id="recaptcha_privacy" class="recaptcha_only_if_privacy"></span> </div> </td> <td rowspan="4" class=\'recaptcha_r4_c2\'></td> <td><a id=\'recaptcha_reload_btn\'><img id=\'recaptcha_reload\' width="25" height="17" /></a></td> <td rowspan="4" class=\'recaptcha_r4_c4\'></td> </tr> <tr> <td><a id=\'recaptcha_switch_audio_btn\' class="recaptcha_only_if_image"><img id=\'recaptcha_switch_audio\' width="25" height="16" alt="" /></a><a id=\'recaptcha_switch_img_btn\' class="recaptcha_only_if_audio"><img id=\'recaptcha_switch_img\' width="25" height="16" alt=""/></a></td> </tr> <tr> <td><a id=\'recaptcha_whatsthis_btn\'><img id=\'recaptcha_whatsthis\' width="25" height="16" /></a></td> </tr> <tr> <td class=\'recaptcha_r7_c1\'></td> <td class=\'recaptcha_r8_c1\'></td> </tr> </table> ';v.CleanCss=".recaptchatable td img{display:block}.recaptchatable .recaptcha_image_cell center img{height:57px}.recaptchatable .recaptcha_image_cell center{height:57px}.recaptchatable .recaptcha_image_cell{background-color:white;height:57px;padding:7px!important}.recaptchatable,#recaptcha_area tr,#recaptcha_area td,#recaptcha_area th{margin:0!important;border:0!important;border-collapse:collapse!important;vertical-align:middle!important}.recaptchatable *{margin:0;padding:0;border:0;color:black;position:static;top:auto;left:auto;right:auto;bottom:auto}.recaptchatable #recaptcha_image{position:relative;margin:auto;border:1px solid #dfdfdf!important}.recaptchatable #recaptcha_image #recaptcha_challenge_image{display:block}.recaptchatable #recaptcha_image #recaptcha_ad_image{display:block;position:absolute;top:0}.recaptchatable a img{border:0}.recaptchatable a,.recaptchatable a:hover{cursor:pointer;outline:none;border:0!important;padding:0!important;text-decoration:none;color:blue;background:none!important;font-weight:normal}.recaptcha_input_area{position:relative!important;background:none!important}.recaptchatable label.recaptcha_input_area_text{border:1px solid #dfdfdf!important;margin:0!important;padding:0!important;position:static!important;top:auto!important;left:auto!important;right:auto!important;bottom:auto!important}.recaptcha_theme_red label.recaptcha_input_area_text,.recaptcha_theme_white label.recaptcha_input_area_text{color:black!important}.recaptcha_theme_blackglass label.recaptcha_input_area_text{color:white!important}.recaptchatable #recaptcha_response_field{font-size:11pt}.recaptcha_theme_blackglass #recaptcha_response_field,.recaptcha_theme_white #recaptcha_response_field{border:1px solid gray}.recaptcha_theme_red #recaptcha_response_field{border:1px solid #cca940}.recaptcha_audio_cant_hear_link{font-size:7pt;color:black}.recaptchatable{line-height:1em;border:1px solid #dfdfdf!important}.recaptcha_error_text{color:red}.recaptcha_only_if_privacy{float:right;text-align:right;margin-right:7px}#recaptcha-ad-choices{position:absolute;height:15px;top:0;right:0}#recaptcha-ad-choices img{height:15px}.recaptcha-ad-choices-collapsed{width:30px;height:15px;display:block}.recaptcha-ad-choices-expanded{width:75px;height:15px;display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-collapsed{display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-expanded{display:block}";v.CleanHtml='<table id="recaptcha_table" class="recaptchatable"> <tr height="73"> <td class=\'recaptcha_image_cell\' width="302"><center><div id="recaptcha_image"></div></center></td> <td style="padding: 10px 7px 7px 7px;"> <a id=\'recaptcha_reload_btn\'><img id=\'recaptcha_reload\' width="25" height="18" alt="" /></a> <a id=\'recaptcha_switch_audio_btn\' class="recaptcha_only_if_image"><img id=\'recaptcha_switch_audio\' width="25" height="15" alt="" /></a><a id=\'recaptcha_switch_img_btn\' class="recaptcha_only_if_audio"><img id=\'recaptcha_switch_img\' width="25" height="15" alt=""/></a> <a id=\'recaptcha_whatsthis_btn\'><img id=\'recaptcha_whatsthis\' width="25" height="16" /></a> </td> <td style="padding: 18px 7px 18px 7px;"> <img id=\'recaptcha_logo\' alt="" width="71" height="36" /> </td> </tr> <tr> <td style="padding-left: 7px;"> <div class="recaptcha_input_area" style="padding-top: 2px; padding-bottom: 7px;"> <input style="border: 1px solid #3c3c3c; width: 302px;" name="recaptcha_response_field" id="recaptcha_response_field" type="text" /> </div> </td> <td colspan=2><span id="recaptcha_privacy" class="recaptcha_only_if_privacy"></span></td> </tr> </table> ';v.VertCss=".recaptchatable td img{display:block}.recaptchatable .recaptcha_r1_c1{background:url('IMGROOT/sprite.png') 0 -63px no-repeat;width:318px;height:9px}.recaptchatable .recaptcha_r2_c1{background:url('IMGROOT/sprite.png') -18px 0 no-repeat;width:9px;height:57px}.recaptchatable .recaptcha_r2_c2{background:url('IMGROOT/sprite.png') -27px 0 no-repeat;width:9px;height:57px}.recaptchatable .recaptcha_r3_c1{background:url('IMGROOT/sprite.png') 0 0 no-repeat;width:9px;height:63px}.recaptchatable .recaptcha_r3_c2{background:url('IMGROOT/sprite.png') -18px -57px no-repeat;width:300px;height:6px}.recaptchatable .recaptcha_r3_c3{background:url('IMGROOT/sprite.png') -9px 0 no-repeat;width:9px;height:63px}.recaptchatable .recaptcha_r4_c1{background:url('IMGROOT/sprite.png') -43px 0 no-repeat;width:171px;height:49px}.recaptchatable .recaptcha_r4_c2{background:url('IMGROOT/sprite.png') -36px 0 no-repeat;width:7px;height:57px}.recaptchatable .recaptcha_r4_c4{background:url('IMGROOT/sprite.png') -214px 0 no-repeat;width:97px;height:57px}.recaptchatable .recaptcha_r7_c1{background:url('IMGROOT/sprite.png') -43px -49px no-repeat;width:171px;height:8px}.recaptchatable .recaptcha_r8_c1{background:url('IMGROOT/sprite.png') -43px -49px no-repeat;width:25px;height:8px}.recaptchatable .recaptcha_image_cell center img{height:57px}.recaptchatable .recaptcha_image_cell center{height:57px}.recaptchatable .recaptcha_image_cell{background-color:white;height:57px}#recaptcha_area,#recaptcha_table{width:318px!important}.recaptchatable,#recaptcha_area tr,#recaptcha_area td,#recaptcha_area th{margin:0!important;border:0!important;padding:0!important;border-collapse:collapse!important;vertical-align:middle!important}.recaptchatable *{margin:0;padding:0;border:0;font-family:helvetica,sans-serif;font-size:8pt;color:black;position:static;top:auto;left:auto;right:auto;bottom:auto}.recaptchatable #recaptcha_image{position:relative;margin:auto}.recaptchatable #recaptcha_image #recaptcha_challenge_image{display:block}.recaptchatable #recaptcha_image #recaptcha_ad_image{display:block;position:absolute;top:0}.recaptchatable img{border:0!important;margin:0!important;padding:0!important}.recaptchatable a,.recaptchatable a:hover{cursor:pointer;outline:none;border:0!important;padding:0!important;text-decoration:none;color:blue;background:none!important;font-weight:normal}.recaptcha_input_area{position:relative!important;width:153px!important;height:45px!important;margin-left:7px!important;margin-right:7px!important;background:none!important}.recaptchatable label.recaptcha_input_area_text{margin:0!important;padding:0!important;position:static!important;top:auto!important;left:auto!important;right:auto!important;bottom:auto!important;background:none!important;height:auto!important;width:auto!important}.recaptcha_theme_red label.recaptcha_input_area_text,.recaptcha_theme_white label.recaptcha_input_area_text{color:black!important}.recaptcha_theme_blackglass label.recaptcha_input_area_text{color:white!important}.recaptchatable #recaptcha_response_field{width:153px!important;position:relative!important;bottom:7px!important;padding:0!important;margin:15px 0 0 0!important;font-size:10pt}.recaptcha_theme_blackglass #recaptcha_response_field,.recaptcha_theme_white #recaptcha_response_field{border:1px solid gray}.recaptcha_theme_red #recaptcha_response_field{border:1px solid #cca940}.recaptcha_audio_cant_hear_link{font-size:7pt;color:black}.recaptchatable{line-height:1!important}#recaptcha_instructions_error{color:red!important}.recaptcha_only_if_privacy{float:right;text-align:right}#recaptcha-ad-choices{position:absolute;height:15px;top:0;right:0}#recaptcha-ad-choices img{height:15px}.recaptcha-ad-choices-collapsed{width:30px;height:15px;display:block}.recaptcha-ad-choices-expanded{width:75px;height:15px;display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-collapsed{display:none}#recaptcha-ad-choices:hover .recaptcha-ad-choices-expanded{display:block}";var w={visual_challenge:"Get a visual challenge",audio_challenge:"Get an audio challenge",refresh_btn:"Get a new challenge",instructions_visual:"Type the text:",instructions_audio:"Type what you hear:",help_btn:"Help",play_again:"Play sound again",cant_hear_this:"Download sound as MP3",incorrect_try_again:"Incorrect. Try again.",image_alt_text:"reCAPTCHA challenge image",privacy_and_terms:"Privacy & Terms"},ja={visual_challenge:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u0645\u0631\u0626\u064a",
+audio_challenge:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u0635\u0648\u062a\u064a",refresh_btn:"\u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u062a\u062d\u062f\u064d \u062c\u062f\u064a\u062f",instructions_visual:"\u064a\u0631\u062c\u0649 \u0643\u062a\u0627\u0628\u0629 \u0627\u0644\u0646\u0635:",instructions_audio:"\u0627\u0643\u062a\u0628 \u0645\u0627 \u062a\u0633\u0645\u0639\u0647:",help_btn:"\u0645\u0633\u0627\u0639\u062f\u0629",play_again:"\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u0635\u0648\u062a \u0645\u0631\u0629 \u0623\u062e\u0631\u0649",
+cant_hear_this:"\u062a\u0646\u0632\u064a\u0644 \u0627\u0644\u0635\u0648\u062a \u0628\u062a\u0646\u0633\u064a\u0642 MP3",incorrect_try_again:"\u063a\u064a\u0631 \u0635\u062d\u064a\u062d. \u0623\u0639\u062f \u0627\u0644\u0645\u062d\u0627\u0648\u0644\u0629.",image_alt_text:"\u0635\u0648\u0631\u0629 \u0627\u0644\u062a\u062d\u062f\u064a \u0645\u0646 reCAPTCHA",privacy_and_terms:"\u0627\u0644\u062e\u0635\u0648\u0635\u064a\u0629 \u0648\u0627\u0644\u0628\u0646\u0648\u062f"},ka={visual_challenge:"Obtener una pista visual",
+audio_challenge:"Obtener una pista sonora",refresh_btn:"Obtener una pista nueva",instructions_visual:"Introduzca el texto:",instructions_audio:"Escribe lo que oigas:",help_btn:"Ayuda",play_again:"Volver a reproducir el sonido",cant_hear_this:"Descargar el sonido en MP3",incorrect_try_again:"Incorrecto. Vu\u00e9lvelo a intentar.",image_alt_text:"Pista de imagen reCAPTCHA",privacy_and_terms:"Privacidad y condiciones"},la={visual_challenge:"Kumuha ng pagsubok na visual",audio_challenge:"Kumuha ng pagsubok na audio",
+refresh_btn:"Kumuha ng bagong pagsubok",instructions_visual:"I-type ang teksto:",instructions_audio:"I-type ang iyong narinig",help_btn:"Tulong",play_again:"I-play muli ang tunog",cant_hear_this:"I-download ang tunog bilang MP3",incorrect_try_again:"Hindi wasto. Muling subukan.",image_alt_text:"larawang panghamon ng reCAPTCHA",privacy_and_terms:"Privacy at Mga Tuntunin"},ma={visual_challenge:"Test visuel",audio_challenge:"Test audio",refresh_btn:"Nouveau test",instructions_visual:"Saisissez le texte\u00a0:",
+instructions_audio:"Qu'entendez-vous ?",help_btn:"Aide",play_again:"R\u00e9\u00e9couter",cant_hear_this:"T\u00e9l\u00e9charger l'audio au format MP3",incorrect_try_again:"Incorrect. Veuillez r\u00e9essayer.",image_alt_text:"Image reCAPTCHA",privacy_and_terms:"Confidentialit\u00e9 et conditions d'utilisation"},na={visual_challenge:"Dapatkan kata pengujian berbentuk visual",audio_challenge:"Dapatkan kata pengujian berbentuk audio",refresh_btn:"Dapatkan kata pengujian baru",instructions_visual:"Ketik teks:",
+instructions_audio:"Ketik yang Anda dengar:",help_btn:"Bantuan",play_again:"Putar suara sekali lagi",cant_hear_this:"Unduh suara sebagai MP3",incorrect_try_again:"Salah. Coba lagi.",image_alt_text:"Gambar tantangan reCAPTCHA",privacy_and_terms:"Privasi & Persyaratan"},oa={visual_challenge:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05d7\u05d6\u05d5\u05ea\u05d9",audio_challenge:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05e9\u05de\u05e2",refresh_btn:"\u05e7\u05d1\u05dc \u05d0\u05ea\u05d2\u05e8 \u05d7\u05d3\u05e9",
+instructions_visual:"\u05d4\u05e7\u05dc\u05d3 \u05d0\u05ea \u05d4\u05d8\u05e7\u05e1\u05d8:",instructions_audio:"\u05d4\u05e7\u05dc\u05d3 \u05d0\u05ea \u05de\u05d4 \u05e9\u05d0\u05ea\u05d4 \u05e9\u05d5\u05de\u05e2:",help_btn:"\u05e2\u05d6\u05e8\u05d4",play_again:"\u05d4\u05e4\u05e2\u05dc \u05e9\u05d5\u05d1 \u05d0\u05ea \u05d4\u05e9\u05de\u05e2",cant_hear_this:"\u05d4\u05d5\u05e8\u05d3 \u05e9\u05de\u05e2 \u05db-3MP",incorrect_try_again:"\u05e9\u05d2\u05d5\u05d9. \u05e0\u05e1\u05d4 \u05e9\u05d5\u05d1.",
+image_alt_text:"\u05ea\u05de\u05d5\u05e0\u05ea \u05d0\u05ea\u05d2\u05e8 \u05e9\u05dc reCAPTCHA",privacy_and_terms:"\u05e4\u05e8\u05d8\u05d9\u05d5\u05ea \u05d5\u05ea\u05e0\u05d0\u05d9\u05dd"},pa={visual_challenge:"Obter um desafio visual",audio_challenge:"Obter um desafio de \u00e1udio",refresh_btn:"Obter um novo desafio",instructions_visual:"Digite o texto:",instructions_audio:"Digite o que voc\u00ea ouve:",help_btn:"Ajuda",play_again:"Reproduzir som novamente",cant_hear_this:"Fazer download do som no formato MP3",
+incorrect_try_again:"Incorreto. Tente novamente.",image_alt_text:"Imagem de desafio reCAPTCHA",privacy_and_terms:"Privacidade e Termos"},qa={visual_challenge:"Ob\u0163ine\u0163i un cod captcha vizual",audio_challenge:"Ob\u0163ine\u0163i un cod captcha audio",refresh_btn:"Ob\u0163ine\u0163i un nou cod captcha",instructions_visual:"Introduce\u021bi textul:",instructions_audio:"Introduce\u0163i ceea ce auzi\u0163i:",help_btn:"Ajutor",play_again:"Reda\u0163i sunetul din nou",cant_hear_this:"Desc\u0103rca\u0163i fi\u015fierul audio ca MP3",
+incorrect_try_again:"Incorect. \u00cencerca\u0163i din nou.",image_alt_text:"Imagine de verificare reCAPTCHA",privacy_and_terms:"Confiden\u0163ialitate \u015fi termeni"},ra={visual_challenge:"\u6536\u5230\u4e00\u4e2a\u89c6\u9891\u9080\u8bf7",audio_challenge:"\u6362\u4e00\u7ec4\u97f3\u9891\u9a8c\u8bc1\u7801",refresh_btn:"\u6362\u4e00\u7ec4\u9a8c\u8bc1\u7801",instructions_visual:"\u8f93\u5165\u6587\u5b57\uff1a",instructions_audio:"\u8bf7\u952e\u5165\u60a8\u542c\u5230\u7684\u5185\u5bb9\uff1a",help_btn:"\u5e2e\u52a9",
+play_again:"\u91cd\u65b0\u64ad\u653e",cant_hear_this:"\u4ee5 MP3 \u683c\u5f0f\u4e0b\u8f7d\u58f0\u97f3",incorrect_try_again:"\u4e0d\u6b63\u786e\uff0c\u8bf7\u91cd\u8bd5\u3002",image_alt_text:"reCAPTCHA \u9a8c\u8bc1\u56fe\u7247",privacy_and_terms:"\u9690\u79c1\u6743\u548c\u4f7f\u7528\u6761\u6b3e"},sa={en:w,af:{visual_challenge:"Kry 'n visuele verifi\u00ebring",audio_challenge:"Kry 'n klankverifi\u00ebring",refresh_btn:"Kry 'n nuwe verifi\u00ebring",instructions_visual:"",instructions_audio:"Tik wat jy hoor:",
+help_btn:"Hulp",play_again:"Speel geluid weer",cant_hear_this:"Laai die klank af as MP3",incorrect_try_again:"Verkeerd. Probeer weer.",image_alt_text:"reCAPTCHA-uitdagingprent",privacy_and_terms:"Privaatheid en bepalings"},am:{visual_challenge:"\u12e8\u12a5\u12ed\u1273 \u1270\u130b\u1323\u121a \u12a0\u130d\u129d",audio_challenge:"\u120c\u120b \u12a0\u12f2\u1235 \u12e8\u12f5\u121d\u133d \u1325\u12eb\u1244 \u12ed\u1245\u1228\u1265",refresh_btn:"\u120c\u120b \u12a0\u12f2\u1235 \u1325\u12eb\u1244 \u12ed\u1245\u1228\u1265",
+instructions_visual:"",instructions_audio:"\u12e8\u121d\u1275\u1230\u121b\u12cd\u1295 \u1270\u12ed\u1265\u1361-",help_btn:"\u12a5\u1308\u12db",play_again:"\u12f5\u121d\u1339\u1295 \u12a5\u1295\u12f0\u1308\u1293 \u12a0\u132b\u12cd\u1275",cant_hear_this:"\u12f5\u121d\u1339\u1295 \u1260MP3 \u1245\u122d\u133d \u12a0\u12cd\u122d\u12f5",incorrect_try_again:"\u1275\u12ad\u12ad\u120d \u12a0\u12ed\u12f0\u1208\u121d\u1362 \u12a5\u1295\u12f0\u1308\u1293 \u121e\u12ad\u122d\u1362",image_alt_text:"reCAPTCHA \u121d\u1235\u120d \u130d\u1320\u121d",
+privacy_and_terms:"\u130d\u120b\u12ca\u1290\u1275 \u12a5\u1293 \u12cd\u120d"},ar:ja,"ar-EG":ja,bg:{visual_challenge:"\u041f\u043e\u043b\u0443\u0447\u0430\u0432\u0430\u043d\u0435 \u043d\u0430 \u0432\u0438\u0437\u0443\u0430\u043b\u043d\u0430 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",audio_challenge:"\u0417\u0430\u0440\u0435\u0436\u0434\u0430\u043d\u0435 \u043d\u0430 \u0430\u0443\u0434\u0438\u043e\u0442\u0435\u0441\u0442",refresh_btn:"\u0417\u0430\u0440\u0435\u0436\u0434\u0430\u043d\u0435 \u043d\u0430 \u043d\u043e\u0432 \u0442\u0435\u0441\u0442",
+instructions_visual:"\u0412\u044a\u0432\u0435\u0434\u0435\u0442\u0435 \u0442\u0435\u043a\u0441\u0442\u0430:",instructions_audio:"\u0412\u044a\u0432\u0435\u0434\u0435\u0442\u0435 \u0447\u0443\u0442\u043e\u0442\u043e:",help_btn:"\u041f\u043e\u043c\u043e\u0449",play_again:"\u041f\u043e\u0432\u0442\u043e\u0440\u043d\u043e \u043f\u0443\u0441\u043a\u0430\u043d\u0435 \u043d\u0430 \u0437\u0432\u0443\u043a\u0430",cant_hear_this:"\u0418\u0437\u0442\u0435\u0433\u043b\u044f\u043d\u0435 \u043d\u0430 \u0437\u0432\u0443\u043a\u0430 \u0432\u044a\u0432 \u0444\u043e\u0440\u043c\u0430\u0442 MP3",
+incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u043d\u043e. \u041e\u043f\u0438\u0442\u0430\u0439\u0442\u0435 \u043e\u0442\u043d\u043e\u0432\u043e.",image_alt_text:"\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u043d\u0430 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430\u0442\u0430 \u0441 reCAPTCHA",privacy_and_terms:"\u041f\u043e\u0432\u0435\u0440\u0438\u0442\u0435\u043b\u043d\u043e\u0441\u0442 \u0438 \u041e\u0431\u0449\u0438 \u0443\u0441\u043b\u043e\u0432\u0438\u044f"},
+bn:{visual_challenge:"\u098f\u0995\u099f\u09bf \u09a6\u09c3\u09b6\u09cd\u09af\u09ae\u09be\u09a8 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be \u09aa\u09be\u09a8",audio_challenge:"\u098f\u0995\u099f\u09bf \u0985\u09a1\u09bf\u0993 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be \u09aa\u09be\u09a8",refresh_btn:"\u098f\u0995\u099f\u09bf \u09a8\u09a4\u09c1\u09a8 \u09aa\u09cd\u09b0\u09a4\u09bf\u09a6\u09cd\u09ac\u09a8\u09cd\u09a6\u09cd\u09ac\u09bf\u09a4\u09be \u09aa\u09be\u09a8",
+instructions_visual:"",instructions_audio:"\u0986\u09aa\u09a8\u09bf \u09af\u09be \u09b6\u09c1\u09a8\u099b\u09c7\u09a8 \u09a4\u09be \u09b2\u09bf\u0996\u09c1\u09a8:",help_btn:"\u09b8\u09b9\u09be\u09df\u09a4\u09be",play_again:"\u0986\u09ac\u09be\u09b0 \u09b8\u09be\u0989\u09a8\u09cd\u09a1 \u09aa\u09cd\u09b2\u09c7 \u0995\u09b0\u09c1\u09a8",cant_hear_this:"MP3 \u09b0\u09c2\u09aa\u09c7 \u09b6\u09ac\u09cd\u09a6 \u09a1\u09be\u0989\u09a8\u09b2\u09cb\u09a1 \u0995\u09b0\u09c1\u09a8",incorrect_try_again:"\u09ac\u09c7\u09a0\u09bf\u0995\u09f7 \u0986\u09ac\u09be\u09b0 \u099a\u09c7\u09b7\u09cd\u099f\u09be \u0995\u09b0\u09c1\u09a8\u09f7",
+image_alt_text:"reCAPTCHA \u099a\u09cd\u09af\u09be\u09b2\u09c7\u099e\u09cd\u099c \u099a\u09bf\u09a4\u09cd\u09b0",privacy_and_terms:"\u0997\u09cb\u09aa\u09a8\u09c0\u09af\u09bc\u09a4\u09be \u0993 \u09b6\u09b0\u09cd\u09a4\u09be\u09ac\u09b2\u09c0"},ca:{visual_challenge:"Obt\u00e9n un repte visual",audio_challenge:"Obteniu una pista sonora",refresh_btn:"Obteniu una pista nova",instructions_visual:"Escriviu el text:",instructions_audio:"Escriviu el que escolteu:",help_btn:"Ajuda",play_again:"Torna a reproduir el so",
+cant_hear_this:"Baixa el so com a MP3",incorrect_try_again:"No \u00e9s correcte. Torna-ho a provar.",image_alt_text:"Imatge del repte de reCAPTCHA",privacy_and_terms:"Privadesa i condicions"},cs:{visual_challenge:"Zobrazit vizu\u00e1ln\u00ed podobu v\u00fdrazu",audio_challenge:"P\u0159ehr\u00e1t zvukovou podobu v\u00fdrazu",refresh_btn:"Zobrazit nov\u00fd v\u00fdraz",instructions_visual:"Zadejte text:",instructions_audio:"Napi\u0161te, co jste sly\u0161eli:",help_btn:"N\u00e1pov\u011bda",play_again:"Znovu p\u0159ehr\u00e1t zvuk",
+cant_hear_this:"St\u00e1hnout zvuk ve form\u00e1tu MP3",incorrect_try_again:"\u0160patn\u011b. Zkuste to znovu.",image_alt_text:"Obr\u00e1zek reCAPTCHA",privacy_and_terms:"Ochrana soukrom\u00ed a smluvn\u00ed podm\u00ednky"},da:{visual_challenge:"Hent en visuel udfordring",audio_challenge:"Hent en lydudfordring",refresh_btn:"Hent en ny udfordring",instructions_visual:"Indtast teksten:",instructions_audio:"Indtast det, du h\u00f8rer:",help_btn:"Hj\u00e6lp",play_again:"Afspil lyden igen",cant_hear_this:"Download lyd som MP3",
+incorrect_try_again:"Forkert. Pr\u00f8v igen.",image_alt_text:"reCAPTCHA-udfordringsbillede",privacy_and_terms:"Privatliv og vilk\u00e5r"},de:{visual_challenge:"Captcha abrufen",audio_challenge:"Audio-Captcha abrufen",refresh_btn:"Neues Captcha abrufen",instructions_visual:"Geben Sie den angezeigten Text ein:",instructions_audio:"Geben Sie das Geh\u00f6rte ein:",help_btn:"Hilfe",play_again:"Wort erneut abspielen",cant_hear_this:"Wort als MP3 herunterladen",incorrect_try_again:"Falsch. Bitte versuchen Sie es erneut.",
+image_alt_text:"reCAPTCHA-Bild",privacy_and_terms:"Datenschutzerkl\u00e4rung & Nutzungsbedingungen"},el:{visual_challenge:"\u039f\u03c0\u03c4\u03b9\u03ba\u03ae \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",audio_challenge:"\u0397\u03c7\u03b7\u03c4\u03b9\u03ba\u03ae \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",refresh_btn:"\u039d\u03ad\u03b1 \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7",instructions_visual:"\u03a0\u03bb\u03b7\u03ba\u03c4\u03c1\u03bf\u03bb\u03bf\u03b3\u03ae\u03c3\u03c4\u03b5 \u03c4\u03bf \u03ba\u03b5\u03af\u03bc\u03b5\u03bd\u03bf:",
+instructions_audio:"\u03a0\u03bb\u03b7\u03ba\u03c4\u03c1\u03bf\u03bb\u03bf\u03b3\u03ae\u03c3\u03c4\u03b5 \u03cc\u03c4\u03b9 \u03b1\u03ba\u03bf\u03cd\u03c4\u03b5:",help_btn:"\u0392\u03bf\u03ae\u03b8\u03b5\u03b9\u03b1",play_again:"\u0391\u03bd\u03b1\u03c0\u03b1\u03c1\u03b1\u03b3\u03c9\u03b3\u03ae \u03ae\u03c7\u03bf\u03c5 \u03be\u03b1\u03bd\u03ac",cant_hear_this:"\u039b\u03ae\u03c8\u03b7 \u03ae\u03c7\u03bf\u03c5 \u03c9\u03c2 \u039c\u03a13",incorrect_try_again:"\u039b\u03ac\u03b8\u03bf\u03c2. \u0394\u03bf\u03ba\u03b9\u03bc\u03ac\u03c3\u03c4\u03b5 \u03be\u03b1\u03bd\u03ac.",
+image_alt_text:"\u0395\u03b9\u03ba\u03cc\u03bd\u03b1 \u03c0\u03c1\u03cc\u03ba\u03bb\u03b7\u03c3\u03b7\u03c2 reCAPTCHA",privacy_and_terms:"\u0391\u03c0\u03cc\u03c1\u03c1\u03b7\u03c4\u03bf \u03ba\u03b1\u03b9 \u03cc\u03c1\u03bf\u03b9"},"en-GB":w,"en-US":w,es:ka,"es-419":{visual_challenge:"Enfrentar un desaf\u00edo visual",audio_challenge:"Enfrentar un desaf\u00edo de audio",refresh_btn:"Enfrentar un nuevo desaf\u00edo",instructions_visual:"Escriba el texto:",instructions_audio:"Escribe lo que escuchas:",
+help_btn:"Ayuda",play_again:"Reproducir sonido de nuevo",cant_hear_this:"Descargar sonido en formato MP3",incorrect_try_again:"Incorrecto. Vuelve a intentarlo.",image_alt_text:"Imagen del desaf\u00edo de la reCAPTCHA",privacy_and_terms:"Privacidad y condiciones"},"es-ES":ka,et:{visual_challenge:"Kuva kuvap\u00f5hine robotil\u00f5ks",audio_challenge:"Kuva helip\u00f5hine robotil\u00f5ks",refresh_btn:"Kuva uus robotil\u00f5ks",instructions_visual:"Tippige tekst:",instructions_audio:"Tippige, mida kuulete.",
+help_btn:"Abi",play_again:"Esita heli uuesti",cant_hear_this:"Laadi heli alla MP3-vormingus",incorrect_try_again:"Vale. Proovige uuesti.",image_alt_text:"reCAPTCHA robotil\u00f5ksu kujutis",privacy_and_terms:"Privaatsus ja tingimused"},eu:{visual_challenge:"Eskuratu ikusizko erronka",audio_challenge:"Eskuratu audio-erronka",refresh_btn:"Eskuratu erronka berria",instructions_visual:"",instructions_audio:"Idatzi entzuten duzuna:",help_btn:"Laguntza",play_again:"Erreproduzitu soinua berriro",cant_hear_this:"Deskargatu soinua MP3 gisa",
+incorrect_try_again:"Ez da zuzena. Saiatu berriro.",image_alt_text:"reCAPTCHA erronkaren irudia",privacy_and_terms:"Pribatutasuna eta baldintzak"},fa:{visual_challenge:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u062f\u06cc\u062f\u0627\u0631\u06cc",audio_challenge:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u0635\u0648\u062a\u06cc",refresh_btn:"\u062f\u0631\u06cc\u0627\u0641\u062a \u06cc\u06a9 \u0645\u0639\u0645\u0627\u06cc \u062c\u062f\u06cc\u062f",
+instructions_visual:"",instructions_audio:"\u0622\u0646\u0686\u0647 \u0631\u0627 \u06a9\u0647 \u0645\u06cc\u200c\u0634\u0646\u0648\u06cc\u062f \u062a\u0627\u06cc\u067e \u06a9\u0646\u06cc\u062f:",help_btn:"\u0631\u0627\u0647\u0646\u0645\u0627\u06cc\u06cc",play_again:"\u067e\u062e\u0634 \u0645\u062c\u062f\u062f \u0635\u062f\u0627",cant_hear_this:"\u062f\u0627\u0646\u0644\u0648\u062f \u0635\u062f\u0627 \u0628\u0647 \u0635\u0648\u0631\u062a MP3",incorrect_try_again:"\u0646\u0627\u062f\u0631\u0633\u062a. \u062f\u0648\u0628\u0627\u0631\u0647 \u0627\u0645\u062a\u062d\u0627\u0646 \u06a9\u0646\u06cc\u062f.",
+image_alt_text:"\u062a\u0635\u0648\u06cc\u0631 \u0686\u0627\u0644\u0634\u06cc reCAPTCHA",privacy_and_terms:"\u062d\u0631\u06cc\u0645 \u062e\u0635\u0648\u0635\u06cc \u0648 \u0634\u0631\u0627\u06cc\u0637"},fi:{visual_challenge:"Kuvavahvistus",audio_challenge:"\u00c4\u00e4nivahvistus",refresh_btn:"Uusi kuva",instructions_visual:"Kirjoita teksti:",instructions_audio:"Kirjoita kuulemasi:",help_btn:"Ohje",play_again:"Toista \u00e4\u00e4ni uudelleen",cant_hear_this:"Lataa \u00e4\u00e4ni MP3-tiedostona",
+incorrect_try_again:"V\u00e4\u00e4rin. Yrit\u00e4 uudelleen.",image_alt_text:"reCAPTCHA-kuva",privacy_and_terms:"Tietosuoja ja k\u00e4ytt\u00f6ehdot"},fil:la,fr:ma,"fr-CA":{visual_challenge:"Obtenir un test visuel",audio_challenge:"Obtenir un test audio",refresh_btn:"Obtenir un nouveau test",instructions_visual:"Saisissez le texte\u00a0:",instructions_audio:"Tapez ce que vous entendez\u00a0:",help_btn:"Aide",play_again:"Jouer le son de nouveau",cant_hear_this:"T\u00e9l\u00e9charger le son en format MP3",
+incorrect_try_again:"Erreur, essayez \u00e0 nouveau",image_alt_text:"Image reCAPTCHA",privacy_and_terms:"Confidentialit\u00e9 et conditions d'utilisation"},"fr-FR":ma,gl:{visual_challenge:"Obter unha proba visual",audio_challenge:"Obter unha proba de audio",refresh_btn:"Obter unha proba nova",instructions_visual:"",instructions_audio:"Escribe o que escoitas:",help_btn:"Axuda",play_again:"Reproducir o son de novo",cant_hear_this:"Descargar son como MP3",incorrect_try_again:"Incorrecto. T\u00e9ntao de novo.",
+image_alt_text:"Imaxe de proba de reCAPTCHA",privacy_and_terms:"Privacidade e condici\u00f3ns"},gu:{visual_challenge:"\u0a8f\u0a95 \u0aa6\u0ac3\u0ab6\u0acd\u0aaf\u0abe\u0aa4\u0acd\u0aae\u0a95 \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",audio_challenge:"\u0a8f\u0a95 \u0a91\u0aa1\u0abf\u0a93 \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",refresh_btn:"\u0a8f\u0a95 \u0aa8\u0ab5\u0acb \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0aae\u0ac7\u0ab3\u0ab5\u0acb",instructions_visual:"",
+instructions_audio:"\u0aa4\u0aae\u0ac7 \u0a9c\u0ac7 \u0ab8\u0abe\u0a82\u0aad\u0ab3\u0acb \u0a9b\u0acb \u0aa4\u0ac7 \u0ab2\u0a96\u0acb:",help_btn:"\u0ab8\u0ab9\u0abe\u0aaf",play_again:"\u0aa7\u0acd\u0ab5\u0aa8\u0abf \u0aab\u0ab0\u0ac0\u0aa5\u0ac0 \u0a9a\u0ab2\u0abe\u0ab5\u0acb",cant_hear_this:"MP3 \u0aa4\u0ab0\u0ac0\u0a95\u0ac7 \u0aa7\u0acd\u0ab5\u0aa8\u0abf\u0aa8\u0ac7 \u0aa1\u0abe\u0a89\u0aa8\u0ab2\u0acb\u0aa1 \u0a95\u0ab0\u0acb",incorrect_try_again:"\u0a96\u0acb\u0a9f\u0ac1\u0a82. \u0aab\u0ab0\u0ac0 \u0aaa\u0acd\u0ab0\u0aaf\u0abe\u0ab8 \u0a95\u0ab0\u0acb.",
+image_alt_text:"reCAPTCHA \u0aaa\u0aa1\u0a95\u0abe\u0ab0 \u0a9b\u0aac\u0ac0",privacy_and_terms:"\u0a97\u0acb\u0aaa\u0aa8\u0ac0\u0aaf\u0aa4\u0abe \u0a85\u0aa8\u0ac7 \u0ab6\u0ab0\u0aa4\u0acb"},hi:{visual_challenge:"\u0915\u094b\u0908 \u0935\u093f\u091c\u0941\u0905\u0932 \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",audio_challenge:"\u0915\u094b\u0908 \u0911\u0921\u093f\u092f\u094b \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",refresh_btn:"\u0915\u094b\u0908 \u0928\u0908 \u091a\u0941\u0928\u094c\u0924\u0940 \u0932\u0947\u0902",
+instructions_visual:"\u091f\u0947\u0915\u094d\u0938\u094d\u091f \u091f\u093e\u0907\u092a \u0915\u0930\u0947\u0902:",instructions_audio:"\u091c\u094b \u0906\u092a \u0938\u0941\u0928 \u0930\u0939\u0947 \u0939\u0948\u0902 \u0909\u0938\u0947 \u0932\u093f\u0916\u0947\u0902:",help_btn:"\u0938\u0939\u093e\u092f\u0924\u093e",play_again:"\u0927\u094d\u200d\u0935\u0928\u093f \u092a\u0941\u0928: \u091a\u0932\u093e\u090f\u0902",cant_hear_this:"\u0927\u094d\u200d\u0935\u0928\u093f \u0915\u094b MP3 \u0915\u0947 \u0930\u0942\u092a \u092e\u0947\u0902 \u0921\u093e\u0909\u0928\u0932\u094b\u0921 \u0915\u0930\u0947\u0902",
+incorrect_try_again:"\u0917\u0932\u0924. \u092a\u0941\u0928: \u092a\u094d\u0930\u092f\u093e\u0938 \u0915\u0930\u0947\u0902.",image_alt_text:"reCAPTCHA \u091a\u0941\u0928\u094c\u0924\u0940 \u091a\u093f\u0924\u094d\u0930",privacy_and_terms:"\u0917\u094b\u092a\u0928\u0940\u092f\u0924\u093e \u0914\u0930 \u0936\u0930\u094d\u0924\u0947\u0902"},hr:{visual_challenge:"Dohvati vizualni upit",audio_challenge:"Dohvati zvu\u010dni upit",refresh_btn:"Dohvati novi upit",instructions_visual:"Unesite tekst:",instructions_audio:"Upi\u0161ite \u0161to \u010dujete:",
+help_btn:"Pomo\u0107",play_again:"Ponovi zvuk",cant_hear_this:"Preuzmi zvuk u MP3 formatu",incorrect_try_again:"Nije to\u010dno. Poku\u0161ajte ponovno.",image_alt_text:"Slikovni izazov reCAPTCHA",privacy_and_terms:"Privatnost i odredbe"},hu:{visual_challenge:"Vizu\u00e1lis kih\u00edv\u00e1s k\u00e9r\u00e9se",audio_challenge:"Hangkih\u00edv\u00e1s k\u00e9r\u00e9se",refresh_btn:"\u00daj kih\u00edv\u00e1s k\u00e9r\u00e9se",instructions_visual:"\u00cdrja be a sz\u00f6veget:",instructions_audio:"\u00cdrja le, amit hall:",
+help_btn:"S\u00fag\u00f3",play_again:"Hang ism\u00e9telt lej\u00e1tsz\u00e1sa",cant_hear_this:"Hang let\u00f6lt\u00e9se MP3 form\u00e1tumban",incorrect_try_again:"Hib\u00e1s. Pr\u00f3b\u00e1lkozzon \u00fajra.",image_alt_text:"reCAPTCHA ellen\u0151rz\u0151 k\u00e9p",privacy_and_terms:"Adatv\u00e9delem \u00e9s Szerz\u0151d\u00e9si Felt\u00e9telek"},hy:{visual_challenge:"\u054d\u057f\u0561\u0576\u0561\u056c \u057f\u0565\u057d\u0578\u0572\u0561\u056f\u0561\u0576 \u056d\u0576\u0564\u056b\u0580",audio_challenge:"\u054d\u057f\u0561\u0576\u0561\u056c \u0571\u0561\u0575\u0576\u0561\u0575\u056b\u0576 \u056d\u0576\u0564\u056b\u0580",
+refresh_btn:"\u054d\u057f\u0561\u0576\u0561\u056c \u0576\u0578\u0580 \u056d\u0576\u0564\u056b\u0580",instructions_visual:"\u0544\u0578\u0582\u057f\u0584\u0561\u0563\u0580\u0565\u0584 \u057f\u0565\u0584\u057d\u057f\u0568\u055d",instructions_audio:"\u0544\u0578\u0582\u057f\u0584\u0561\u0563\u0580\u0565\u0584 \u0561\u0575\u0576, \u056b\u0576\u0579 \u056c\u057d\u0578\u0582\u0574 \u0565\u0584\u055d",help_btn:"\u0555\u0563\u0576\u0578\u0582\u0569\u0575\u0578\u0582\u0576",play_again:"\u0546\u057e\u0561\u0563\u0561\u0580\u056f\u0565\u056c \u0571\u0561\u0575\u0576\u0568 \u056f\u0580\u056f\u056b\u0576",
+cant_hear_this:"\u0532\u0565\u057c\u0576\u0565\u056c \u0571\u0561\u0575\u0576\u0568 \u0578\u0580\u057a\u0565\u057d MP3",incorrect_try_again:"\u054d\u056d\u0561\u056c \u0567: \u0553\u0578\u0580\u0571\u0565\u0584 \u056f\u0580\u056f\u056b\u0576:",image_alt_text:"reCAPTCHA \u057a\u0561\u057f\u056f\u0565\u0580\u0578\u057e \u056d\u0576\u0564\u056b\u0580",privacy_and_terms:"\u0533\u0561\u0572\u057f\u0576\u056b\u0578\u0582\u0569\u0575\u0561\u0576 & \u057a\u0561\u0575\u0574\u0561\u0576\u0576\u0565\u0580"},
+id:na,is:{visual_challenge:"F\u00e1 a\u00f0gangspr\u00f3f sem mynd",audio_challenge:"F\u00e1 a\u00f0gangspr\u00f3f sem hlj\u00f3\u00f0skr\u00e1",refresh_btn:"F\u00e1 n\u00fdtt a\u00f0gangspr\u00f3f",instructions_visual:"",instructions_audio:"Sl\u00e1\u00f0u inn \u00fea\u00f0 sem \u00fe\u00fa heyrir:",help_btn:"Hj\u00e1lp",play_again:"Spila hlj\u00f3\u00f0 aftur",cant_hear_this:"S\u00e6kja hlj\u00f3\u00f0 sem MP3",incorrect_try_again:"Rangt. Reyndu aftur.",image_alt_text:"mynd reCAPTCHA a\u00f0gangspr\u00f3fs",
+privacy_and_terms:"Pers\u00f3nuvernd og skilm\u00e1lar"},it:{visual_challenge:"Verifica visiva",audio_challenge:"Verifica audio",refresh_btn:"Nuova verifica",instructions_visual:"Digita il testo:",instructions_audio:"Digita ci\u00f2 che senti:",help_btn:"Guida",play_again:"Riproduci di nuovo audio",cant_hear_this:"Scarica audio in MP3",incorrect_try_again:"Sbagliato. Riprova.",image_alt_text:"Immagine di verifica reCAPTCHA",privacy_and_terms:"Privacy e Termini"},iw:oa,ja:{visual_challenge:"\u753b\u50cf\u3067\u78ba\u8a8d\u3057\u307e\u3059",
+audio_challenge:"\u97f3\u58f0\u3067\u78ba\u8a8d\u3057\u307e\u3059",refresh_btn:"\u5225\u306e\u5358\u8a9e\u3067\u3084\u308a\u76f4\u3057\u307e\u3059",instructions_visual:"\u30c6\u30ad\u30b9\u30c8\u3092\u5165\u529b:",instructions_audio:"\u805e\u3053\u3048\u305f\u5358\u8a9e\u3092\u5165\u529b\u3057\u307e\u3059:",help_btn:"\u30d8\u30eb\u30d7",play_again:"\u3082\u3046\u4e00\u5ea6\u805e\u304f",cant_hear_this:"MP3 \u3067\u97f3\u58f0\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9",incorrect_try_again:"\u6b63\u3057\u304f\u3042\u308a\u307e\u305b\u3093\u3002\u3082\u3046\u4e00\u5ea6\u3084\u308a\u76f4\u3057\u3066\u304f\u3060\u3055\u3044\u3002",
+image_alt_text:"reCAPTCHA \u78ba\u8a8d\u7528\u753b\u50cf",privacy_and_terms:"\u30d7\u30e9\u30a4\u30d0\u30b7\u30fc\u3068\u5229\u7528\u898f\u7d04"},kn:{visual_challenge:"\u0ca6\u0cc3\u0cb6\u0ccd\u0caf \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0cb8\u0ccd\u0cb5\u0cc0\u0c95\u0cb0\u0cbf\u0cb8\u0cbf",audio_challenge:"\u0c86\u0ca1\u0cbf\u0caf\u0ccb \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0cb8\u0ccd\u0cb5\u0cc0\u0c95\u0cb0\u0cbf\u0cb8\u0cbf",refresh_btn:"\u0cb9\u0cca\u0cb8 \u0cb8\u0cb5\u0cbe\u0cb2\u0cca\u0c82\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0caa\u0ca1\u0cc6\u0caf\u0cbf\u0cb0\u0cbf",
+instructions_visual:"",instructions_audio:"\u0ca8\u0cbf\u0cae\u0c97\u0cc6 \u0c95\u0cc7\u0cb3\u0cbf\u0cb8\u0cc1\u0cb5\u0cc1\u0ca6\u0ca8\u0ccd\u0ca8\u0cc1 \u0c9f\u0cc8\u0caa\u0ccd\u200c \u0cae\u0cbe\u0ca1\u0cbf:",help_btn:"\u0cb8\u0cb9\u0cbe\u0caf",play_again:"\u0ca7\u0ccd\u0cb5\u0ca8\u0cbf\u0caf\u0ca8\u0ccd\u0ca8\u0cc1 \u0cae\u0ca4\u0ccd\u0ca4\u0cc6 \u0caa\u0ccd\u0cb2\u0cc7 \u0cae\u0cbe\u0ca1\u0cbf",cant_hear_this:"\u0ca7\u0ccd\u0cb5\u0ca8\u0cbf\u0caf\u0ca8\u0ccd\u0ca8\u0cc1 MP3 \u0cb0\u0cc2\u0caa\u0ca6\u0cb2\u0ccd\u0cb2\u0cbf \u0ca1\u0ccc\u0ca8\u0ccd\u200c\u0cb2\u0ccb\u0ca1\u0ccd \u0cae\u0cbe\u0ca1\u0cbf",
+incorrect_try_again:"\u0ca4\u0caa\u0ccd\u0caa\u0cbe\u0c97\u0cbf\u0ca6\u0cc6. \u0cae\u0ca4\u0ccd\u0ca4\u0cca\u0cae\u0ccd\u0cae\u0cc6 \u0caa\u0ccd\u0cb0\u0caf\u0ca4\u0ccd\u0ca8\u0cbf\u0cb8\u0cbf.",image_alt_text:"reCAPTCHA \u0cb8\u0cb5\u0cbe\u0cb2\u0cc1 \u0c9a\u0cbf\u0ca4\u0ccd\u0cb0",privacy_and_terms:"\u0c97\u0ccc\u0caa\u0ccd\u0caf\u0ca4\u0cc6 \u0cae\u0ca4\u0ccd\u0ca4\u0cc1 \u0ca8\u0cbf\u0caf\u0cae\u0c97\u0cb3\u0cc1"},ko:{visual_challenge:"\uadf8\ub9bc\uc73c\ub85c \ubcf4\uc548\ubb38\uc790 \ubc1b\uae30",
+audio_challenge:"\uc74c\uc131\uc73c\ub85c \ubcf4\uc548\ubb38\uc790 \ubc1b\uae30",refresh_btn:"\ubcf4\uc548\ubb38\uc790 \uc0c8\ub85c \ubc1b\uae30",instructions_visual:"\ud14d\uc2a4\ud2b8 \uc785\ub825:",instructions_audio:"\uc74c\uc131 \ubcf4\uc548\ubb38\uc790 \uc785\ub825:",help_btn:"\ub3c4\uc6c0\ub9d0",play_again:"\uc74c\uc131 \ub2e4\uc2dc \ub4e3\uae30",cant_hear_this:"\uc74c\uc131\uc744 MP3\ub85c \ub2e4\uc6b4\ub85c\ub4dc",incorrect_try_again:"\ud2c0\ub838\uc2b5\ub2c8\ub2e4. \ub2e4\uc2dc \uc2dc\ub3c4\ud574 \uc8fc\uc138\uc694.",
+image_alt_text:"reCAPTCHA \ubcf4\uc548\ubb38\uc790 \uc774\ubbf8\uc9c0",privacy_and_terms:"\uac1c\uc778\uc815\ubcf4 \ubcf4\ud638 \ubc0f \uc57d\uad00"},ln:ma,lt:{visual_challenge:"Gauti vaizdin\u012f atpa\u017einimo test\u0105",audio_challenge:"Gauti garso atpa\u017einimo test\u0105",refresh_btn:"Gauti nauj\u0105 atpa\u017einimo test\u0105",instructions_visual:"\u012eveskite tekst\u0105:",instructions_audio:"\u012eveskite tai, k\u0105 girdite:",help_btn:"Pagalba",play_again:"Dar kart\u0105 paleisti gars\u0105",
+cant_hear_this:"Atsisi\u0173sti gars\u0105 kaip MP3",incorrect_try_again:"Neteisingai. Bandykite dar kart\u0105.",image_alt_text:"Testo \u201ereCAPTCHA\u201c vaizdas",privacy_and_terms:"Privatumas ir s\u0105lygos"},lv:{visual_challenge:"Sa\u0146emt vizu\u0101lu izaicin\u0101jumu",audio_challenge:"Sa\u0146emt audio izaicin\u0101jumu",refresh_btn:"Sa\u0146emt jaunu izaicin\u0101jumu",instructions_visual:"Ievadiet tekstu:",instructions_audio:"Ierakstiet dzirdamo:",help_btn:"Pal\u012bdz\u012bba",play_again:"V\u0113lreiz atska\u0146ot ska\u0146u",
+cant_hear_this:"Lejupiel\u0101d\u0113t ska\u0146u MP3\u00a0form\u0101t\u0101",incorrect_try_again:"Nepareizi. M\u0113\u0123iniet v\u0113lreiz.",image_alt_text:"reCAPTCHA izaicin\u0101juma att\u0113ls",privacy_and_terms:"Konfidencialit\u0101te un noteikumi"},ml:{visual_challenge:"\u0d12\u0d30\u0d41 \u0d26\u0d43\u0d36\u0d4d\u0d2f \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",audio_challenge:"\u0d12\u0d30\u0d41 \u0d13\u0d21\u0d3f\u0d2f\u0d4b \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",
+refresh_btn:"\u0d12\u0d30\u0d41 \u0d2a\u0d41\u0d24\u0d3f\u0d2f \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d28\u0d47\u0d1f\u0d41\u0d15",instructions_visual:"",instructions_audio:"\u0d15\u0d47\u0d7e\u0d15\u0d4d\u0d15\u0d41\u0d28\u0d4d\u0d28\u0d24\u0d4d \u0d1f\u0d48\u0d2a\u0d4d\u0d2a\u0d4d \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d42:",help_btn:"\u0d38\u0d39\u0d3e\u0d2f\u0d02",play_again:"\u0d36\u0d2c\u0d4d\u200c\u0d26\u0d02 \u0d35\u0d40\u0d23\u0d4d\u0d1f\u0d41\u0d02 \u0d2a\u0d4d\u0d32\u0d47 \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d41\u0d15",
+cant_hear_this:"\u0d36\u0d2c\u0d4d\u200c\u0d26\u0d02 MP3 \u0d06\u0d2f\u0d3f \u0d21\u0d57\u0d7a\u0d32\u0d4b\u0d21\u0d4d \u0d1a\u0d46\u0d2f\u0d4d\u0d2f\u0d41\u0d15",incorrect_try_again:"\u0d24\u0d46\u0d31\u0d4d\u0d31\u0d3e\u0d23\u0d4d. \u0d35\u0d40\u0d23\u0d4d\u0d1f\u0d41\u0d02 \u0d36\u0d4d\u0d30\u0d2e\u0d3f\u0d15\u0d4d\u0d15\u0d41\u0d15.",image_alt_text:"reCAPTCHA \u0d1a\u0d32\u0d1e\u0d4d\u0d1a\u0d4d \u0d07\u0d2e\u0d47\u0d1c\u0d4d",privacy_and_terms:"\u0d38\u0d4d\u0d35\u0d15\u0d3e\u0d30\u0d4d\u0d2f\u0d24\u0d2f\u0d41\u0d02 \u0d28\u0d3f\u0d2c\u0d28\u0d4d\u0d27\u0d28\u0d15\u0d33\u0d41\u0d02"},
+mr:{visual_challenge:"\u0926\u0943\u0936\u094d\u200d\u092f\u092e\u093e\u0928 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",audio_challenge:"\u0911\u0921\u0940\u0913 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",refresh_btn:"\u090f\u0915 \u0928\u0935\u0940\u0928 \u0906\u0935\u094d\u0939\u093e\u0928 \u092a\u094d\u0930\u093e\u092a\u094d\u0924 \u0915\u0930\u093e",instructions_visual:"",instructions_audio:"\u0906\u092a\u0932\u094d\u092f\u093e\u0932\u093e \u091c\u0947 \u0910\u0915\u0942 \u092f\u0947\u0908\u0932 \u0924\u0947 \u091f\u093e\u0907\u092a \u0915\u0930\u093e:",
+help_btn:"\u092e\u0926\u0924",play_again:"\u0927\u094d\u200d\u0935\u0928\u0940 \u092a\u0941\u0928\u094d\u0939\u093e \u092a\u094d\u200d\u0932\u0947 \u0915\u0930\u093e",cant_hear_this:"MP3 \u0930\u0941\u092a\u093e\u0924 \u0927\u094d\u200d\u0935\u0928\u0940 \u0921\u093e\u0909\u0928\u0932\u094b\u0921 \u0915\u0930\u093e",incorrect_try_again:"\u0905\u092f\u094b\u0917\u094d\u200d\u092f. \u092a\u0941\u0928\u094d\u200d\u0939\u093e \u092a\u094d\u0930\u092f\u0924\u094d\u200d\u0928 \u0915\u0930\u093e.",image_alt_text:"reCAPTCHA \u0906\u0935\u094d\u200d\u0939\u093e\u0928 \u092a\u094d\u0930\u0924\u093f\u092e\u093e",
+privacy_and_terms:"\u0917\u094b\u092a\u0928\u0940\u092f\u0924\u093e \u0906\u0923\u093f \u0905\u091f\u0940"},ms:{visual_challenge:"Dapatkan cabaran visual",audio_challenge:"Dapatkan cabaran audio",refresh_btn:"Dapatkan cabaran baru",instructions_visual:"Taipkan teksnya:",instructions_audio:"Taip apa yang didengari:",help_btn:"Bantuan",play_again:"Mainkan bunyi sekali lagi",cant_hear_this:"Muat turun bunyi sebagai MP3",incorrect_try_again:"Tidak betul. Cuba lagi.",image_alt_text:"Imej cabaran reCAPTCHA",
+privacy_and_terms:"Privasi & Syarat"},nl:{visual_challenge:"Een visuele uitdaging proberen",audio_challenge:"Een audio-uitdaging proberen",refresh_btn:"Een nieuwe uitdaging proberen",instructions_visual:"Typ de tekst:",instructions_audio:"Typ wat u hoort:",help_btn:"Help",play_again:"Geluid opnieuw afspelen",cant_hear_this:"Geluid downloaden als MP3",incorrect_try_again:"Onjuist. Probeer het opnieuw.",image_alt_text:"reCAPTCHA-uitdagingsafbeelding",privacy_and_terms:"Privacy en voorwaarden"},no:{visual_challenge:"F\u00e5 en bildeutfordring",
+audio_challenge:"F\u00e5 en lydutfordring",refresh_btn:"F\u00e5 en ny utfordring",instructions_visual:"Skriv inn teksten:",instructions_audio:"Skriv inn det du h\u00f8rer:",help_btn:"Hjelp",play_again:"Spill av lyd p\u00e5 nytt",cant_hear_this:"Last ned lyd som MP3",incorrect_try_again:"Feil. Pr\u00f8v p\u00e5 nytt.",image_alt_text:"reCAPTCHA-utfordringsbilde",privacy_and_terms:"Personvern og vilk\u00e5r"},pl:{visual_challenge:"Poka\u017c podpowied\u017a wizualn\u0105",audio_challenge:"Odtw\u00f3rz podpowied\u017a d\u017awi\u0119kow\u0105",
+refresh_btn:"Nowa podpowied\u017a",instructions_visual:"Przepisz tekst:",instructions_audio:"Wpisz us\u0142yszane s\u0142owa:",help_btn:"Pomoc",play_again:"Odtw\u00f3rz d\u017awi\u0119k ponownie",cant_hear_this:"Pobierz d\u017awi\u0119k jako plik MP3",incorrect_try_again:"Nieprawid\u0142owo. Spr\u00f3buj ponownie.",image_alt_text:"Zadanie obrazkowe reCAPTCHA",privacy_and_terms:"Prywatno\u015b\u0107 i warunki"},pt:pa,"pt-BR":pa,"pt-PT":{visual_challenge:"Obter um desafio visual",audio_challenge:"Obter um desafio de \u00e1udio",
+refresh_btn:"Obter um novo desafio",instructions_visual:"Introduza o texto:",instructions_audio:"Escreva o que ouvir:",help_btn:"Ajuda",play_again:"Reproduzir som novamente",cant_hear_this:"Transferir som como MP3",incorrect_try_again:"Incorreto. Tente novamente.",image_alt_text:"Imagem de teste reCAPTCHA",privacy_and_terms:"Privacidade e Termos de Utiliza\u00e7\u00e3o"},ro:qa,ru:{visual_challenge:"\u0412\u0438\u0437\u0443\u0430\u043b\u044c\u043d\u0430\u044f \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",
+audio_challenge:"\u0417\u0432\u0443\u043a\u043e\u0432\u0430\u044f \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0430",refresh_btn:"\u041e\u0431\u043d\u043e\u0432\u0438\u0442\u044c",instructions_visual:"\u0412\u0432\u0435\u0434\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442:",instructions_audio:"\u0412\u0432\u0435\u0434\u0438\u0442\u0435 \u0442\u043e, \u0447\u0442\u043e \u0441\u043b\u044b\u0448\u0438\u0442\u0435:",help_btn:"\u0421\u043f\u0440\u0430\u0432\u043a\u0430",play_again:"\u041f\u0440\u043e\u0441\u043b\u0443\u0448\u0430\u0442\u044c \u0435\u0449\u0435 \u0440\u0430\u0437",
+cant_hear_this:"\u0417\u0430\u0433\u0440\u0443\u0437\u0438\u0442\u044c MP3-\u0444\u0430\u0439\u043b",incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e. \u041f\u043e\u0432\u0442\u043e\u0440\u0438\u0442\u0435 \u043f\u043e\u043f\u044b\u0442\u043a\u0443.",image_alt_text:"\u041f\u0440\u043e\u0432\u0435\u0440\u043a\u0430 \u043f\u043e \u0441\u043b\u043e\u0432\u0443 reCAPTCHA",privacy_and_terms:"\u041f\u0440\u0430\u0432\u0438\u043b\u0430 \u0438 \u043f\u0440\u0438\u043d\u0446\u0438\u043f\u044b"},
+sk:{visual_challenge:"Zobrazi\u0165 vizu\u00e1lnu podobu",audio_challenge:"Prehra\u0165 zvukov\u00fa podobu",refresh_btn:"Zobrazi\u0165 nov\u00fd v\u00fdraz",instructions_visual:"Zadajte text:",instructions_audio:"Zadajte, \u010do po\u010dujete:",help_btn:"Pomocn\u00edk",play_again:"Znova prehra\u0165 zvuk",cant_hear_this:"Prevzia\u0165 zvuk v podobe s\u00faboru MP3",incorrect_try_again:"Nespr\u00e1vne. Sk\u00faste to znova.",image_alt_text:"Obr\u00e1zok zadania reCAPTCHA",privacy_and_terms:"Ochrana osobn\u00fdch \u00fadajov a Zmluvn\u00e9 podmienky"},
+sl:{visual_challenge:"Vizualni preskus",audio_challenge:"Zvo\u010dni preskus",refresh_btn:"Nov preskus",instructions_visual:"Vnesite besedilo:",instructions_audio:"Natipkajte, kaj sli\u0161ite:",help_btn:"Pomo\u010d",play_again:"Znova predvajaj zvok",cant_hear_this:"Prenesi zvok kot MP3",incorrect_try_again:"Napa\u010dno. Poskusite znova.",image_alt_text:"Slika izziva reCAPTCHA",privacy_and_terms:"Zasebnost in pogoji"},sr:{visual_challenge:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u0432\u0438\u0437\u0443\u0435\u043b\u043d\u0438 \u0443\u043f\u0438\u0442",
+audio_challenge:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u0430\u0443\u0434\u0438\u043e \u0443\u043f\u0438\u0442",refresh_btn:"\u041f\u0440\u0438\u043c\u0438\u0442\u0435 \u043d\u043e\u0432\u0438 \u0443\u043f\u0438\u0442",instructions_visual:"\u0423\u043d\u0435\u0441\u0438\u0442\u0435 \u0442\u0435\u043a\u0441\u0442:",instructions_audio:"\u041e\u0442\u043a\u0443\u0446\u0430\u0458\u0442\u0435 \u043e\u043d\u043e \u0448\u0442\u043e \u0447\u0443\u0458\u0435\u0442\u0435:",help_btn:"\u041f\u043e\u043c\u043e\u045b",
+play_again:"\u041f\u043e\u043d\u043e\u0432\u043e \u043f\u0443\u0441\u0442\u0438 \u0437\u0432\u0443\u043a",cant_hear_this:"\u041f\u0440\u0435\u0443\u0437\u043c\u0438 \u0437\u0432\u0443\u043a \u043a\u0430\u043e MP3 \u0441\u043d\u0438\u043c\u0430\u043a",incorrect_try_again:"\u041d\u0435\u0442\u0430\u0447\u043d\u043e. \u041f\u043e\u043a\u0443\u0448\u0430\u0458\u0442\u0435 \u043f\u043e\u043d\u043e\u0432\u043e.",image_alt_text:"\u0421\u043b\u0438\u043a\u0430 reCAPTCHA \u043f\u0440\u043e\u0432\u0435\u0440\u0435",
+privacy_and_terms:"\u041f\u0440\u0438\u0432\u0430\u0442\u043d\u043e\u0441\u0442 \u0438 \u0443\u0441\u043b\u043e\u0432\u0438"},sv:{visual_challenge:"H\u00e4mta captcha i bildformat",audio_challenge:"H\u00e4mta captcha i ljudformat",refresh_btn:"H\u00e4mta ny captcha",instructions_visual:"Skriv texten:",instructions_audio:"Skriv det du h\u00f6r:",help_btn:"Hj\u00e4lp",play_again:"Spela upp ljudet igen",cant_hear_this:"H\u00e4mta ljud som MP3",incorrect_try_again:"Fel. F\u00f6rs\u00f6k igen.",image_alt_text:"reCAPTCHA-bild",
+privacy_and_terms:"Sekretess och villkor"},sw:{visual_challenge:"Pata herufi za kusoma",audio_challenge:"Pata herufi za kusikiliza",refresh_btn:"Pata herufi mpya",instructions_visual:"",instructions_audio:"Charaza unachosikia:",help_btn:"Usaidizi",play_again:"Cheza sauti tena",cant_hear_this:"Pakua sauti kama MP3",incorrect_try_again:"Sio sahihi. Jaribu tena.",image_alt_text:"picha ya changamoto ya reCAPTCHA",privacy_and_terms:"Faragha & Masharti"},ta:{visual_challenge:"\u0baa\u0bbe\u0bb0\u0bcd\u0bb5\u0bc8 \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",
+audio_challenge:"\u0b86\u0b9f\u0bbf\u0baf\u0bcb \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",refresh_btn:"\u0baa\u0bc1\u0ba4\u0bbf\u0baf \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bc8\u0baa\u0bcd \u0baa\u0bc6\u0bb1\u0bc1\u0b95",instructions_visual:"",instructions_audio:"\u0b95\u0bc7\u0b9f\u0bcd\u0baa\u0ba4\u0bc8 \u0b9f\u0bc8\u0baa\u0bcd \u0b9a\u0bc6\u0baf\u0bcd\u0b95:",help_btn:"\u0b89\u0ba4\u0bb5\u0bbf",play_again:"\u0b92\u0bb2\u0bbf\u0baf\u0bc8 \u0bae\u0bc0\u0ba3\u0bcd\u0b9f\u0bc1\u0bae\u0bcd \u0b87\u0baf\u0b95\u0bcd\u0b95\u0bc1",
+cant_hear_this:"\u0b92\u0bb2\u0bbf\u0baf\u0bc8 MP3 \u0b86\u0b95 \u0baa\u0ba4\u0bbf\u0bb5\u0bbf\u0bb1\u0b95\u0bcd\u0b95\u0bc1\u0b95",incorrect_try_again:"\u0ba4\u0bb5\u0bb1\u0bbe\u0ba9\u0ba4\u0bc1. \u0bae\u0bc0\u0ba3\u0bcd\u0b9f\u0bc1\u0bae\u0bcd \u0bae\u0bc1\u0baf\u0bb2\u0bb5\u0bc1\u0bae\u0bcd.",image_alt_text:"reCAPTCHA \u0b9a\u0bc7\u0bb2\u0b9e\u0bcd\u0b9a\u0bcd \u0baa\u0b9f\u0bae\u0bcd",privacy_and_terms:"\u0ba4\u0ba9\u0bbf\u0baf\u0bc1\u0bb0\u0bbf\u0bae\u0bc8 & \u0bb5\u0bbf\u0ba4\u0bbf\u0bae\u0bc1\u0bb1\u0bc8\u0b95\u0bb3\u0bcd"},
+te:{visual_challenge:"\u0c12\u0c15 \u0c26\u0c43\u0c36\u0c4d\u0c2f\u0c2e\u0c3e\u0c28 \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",audio_challenge:"\u0c12\u0c15 \u0c06\u0c21\u0c3f\u0c2f\u0c4b \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",refresh_btn:"\u0c15\u0c4d\u0c30\u0c4a\u0c24\u0c4d\u0c24 \u0c38\u0c35\u0c3e\u0c32\u0c41\u0c28\u0c41 \u0c38\u0c4d\u0c35\u0c40\u0c15\u0c30\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f",
+instructions_visual:"",instructions_audio:"\u0c2e\u0c40\u0c30\u0c41 \u0c35\u0c3f\u0c28\u0c4d\u0c28\u0c26\u0c3f \u0c1f\u0c48\u0c2a\u0c4d \u0c1a\u0c47\u0c2f\u0c02\u0c21\u0c3f:",help_btn:"\u0c38\u0c39\u0c3e\u0c2f\u0c02",play_again:"\u0c27\u0c4d\u0c35\u0c28\u0c3f\u0c28\u0c3f \u0c2e\u0c33\u0c4d\u0c32\u0c40 \u0c2a\u0c4d\u0c32\u0c47 \u0c1a\u0c47\u0c2f\u0c3f",cant_hear_this:"\u0c27\u0c4d\u0c35\u0c28\u0c3f\u0c28\u0c3f MP3 \u0c35\u0c32\u0c46 \u0c21\u0c4c\u0c28\u0c4d\u200c\u0c32\u0c4b\u0c21\u0c4d \u0c1a\u0c47\u0c2f\u0c3f",
+incorrect_try_again:"\u0c24\u0c2a\u0c4d\u0c2a\u0c41. \u0c2e\u0c33\u0c4d\u0c32\u0c40 \u0c2a\u0c4d\u0c30\u0c2f\u0c24\u0c4d\u0c28\u0c3f\u0c02\u0c1a\u0c02\u0c21\u0c3f.",image_alt_text:"reCAPTCHA \u0c38\u0c35\u0c3e\u0c32\u0c41 \u0c1a\u0c3f\u0c24\u0c4d\u0c30\u0c02",privacy_and_terms:"\u0c17\u0c4b\u0c2a\u0c4d\u0c2f\u0c24 & \u0c28\u0c3f\u0c2c\u0c02\u0c27\u0c28\u0c32\u0c41"},th:{visual_challenge:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e14\u0e49\u0e32\u0e19\u0e20\u0e32\u0e1e",
+audio_challenge:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e14\u0e49\u0e32\u0e19\u0e40\u0e2a\u0e35\u0e22\u0e07",refresh_btn:"\u0e23\u0e31\u0e1a\u0e04\u0e27\u0e32\u0e21\u0e17\u0e49\u0e32\u0e17\u0e32\u0e22\u0e43\u0e2b\u0e21\u0e48",instructions_visual:"\u0e1e\u0e34\u0e21\u0e1e\u0e4c\u0e02\u0e49\u0e2d\u0e04\u0e27\u0e32\u0e21\u0e19\u0e35\u0e49:",instructions_audio:"\u0e1e\u0e34\u0e21\u0e1e\u0e4c\u0e2a\u0e34\u0e48\u0e07\u0e17\u0e35\u0e48\u0e04\u0e38\u0e13\u0e44\u0e14\u0e49\u0e22\u0e34\u0e19:",
+help_btn:"\u0e04\u0e27\u0e32\u0e21\u0e0a\u0e48\u0e27\u0e22\u0e40\u0e2b\u0e25\u0e37\u0e2d",play_again:"\u0e40\u0e25\u0e48\u0e19\u0e40\u0e2a\u0e35\u0e22\u0e07\u0e2d\u0e35\u0e01\u0e04\u0e23\u0e31\u0e49\u0e07",cant_hear_this:"\u0e14\u0e32\u0e27\u0e42\u0e2b\u0e25\u0e14\u0e40\u0e2a\u0e35\u0e22\u0e07\u0e40\u0e1b\u0e47\u0e19 MP3",incorrect_try_again:"\u0e44\u0e21\u0e48\u0e16\u0e39\u0e01\u0e15\u0e49\u0e2d\u0e07 \u0e25\u0e2d\u0e07\u0e2d\u0e35\u0e01\u0e04\u0e23\u0e31\u0e49\u0e07",image_alt_text:"\u0e23\u0e2b\u0e31\u0e2a\u0e20\u0e32\u0e1e reCAPTCHA",
+privacy_and_terms:"\u0e19\u0e42\u0e22\u0e1a\u0e32\u0e22\u0e2a\u0e48\u0e27\u0e19\u0e1a\u0e38\u0e04\u0e04\u0e25\u0e41\u0e25\u0e30\u0e02\u0e49\u0e2d\u0e01\u0e33\u0e2b\u0e19\u0e14"},tr:{visual_challenge:"G\u00f6rsel sorgu al",audio_challenge:"Sesli sorgu al",refresh_btn:"Yeniden y\u00fckle",instructions_visual:"Metni yaz\u0131n:",instructions_audio:"Duydu\u011funuzu yaz\u0131n:",help_btn:"Yard\u0131m",play_again:"Sesi tekrar \u00e7al",cant_hear_this:"Sesi MP3 olarak indir",incorrect_try_again:"Yanl\u0131\u015f. Tekrar deneyin.",
+image_alt_text:"reCAPTCHA sorusu resmi",privacy_and_terms:"Gizlilik ve \u015eartlar"},uk:{visual_challenge:"\u041e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0432\u0456\u0437\u0443\u0430\u043b\u044c\u043d\u0438\u0439 \u0442\u0435\u043a\u0441\u0442",audio_challenge:"\u041e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0430\u0443\u0434\u0456\u043e\u0437\u0430\u043f\u0438\u0441",refresh_btn:"\u041e\u043d\u043e\u0432\u0438\u0442\u0438 \u0442\u0435\u043a\u0441\u0442",instructions_visual:"\u0412\u0432\u0435\u0434\u0456\u0442\u044c \u0442\u0435\u043a\u0441\u0442:",
+instructions_audio:"\u0412\u0432\u0435\u0434\u0456\u0442\u044c \u043f\u043e\u0447\u0443\u0442\u0435:",help_btn:"\u0414\u043e\u0432\u0456\u0434\u043a\u0430",play_again:"\u0412\u0456\u0434\u0442\u0432\u043e\u0440\u0438\u0442\u0438 \u0437\u0430\u043f\u0438\u0441 \u0449\u0435 \u0440\u0430\u0437",cant_hear_this:"\u0417\u0430\u0432\u0430\u043d\u0442\u0430\u0436\u0438\u0442\u0438 \u0437\u0430\u043f\u0438\u0441 \u044f\u043a MP3",incorrect_try_again:"\u041d\u0435\u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e. \u0421\u043f\u0440\u043e\u0431\u0443\u0439\u0442\u0435 \u0449\u0435 \u0440\u0430\u0437.",
+image_alt_text:"\u0417\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043d\u044f \u0437\u0430\u0432\u0434\u0430\u043d\u043d\u044f reCAPTCHA",privacy_and_terms:"\u041a\u043e\u043d\u0444\u0456\u0434\u0435\u043d\u0446\u0456\u0439\u043d\u0456\u0441\u0442\u044c \u0456 \u0443\u043c\u043e\u0432\u0438"},ur:{visual_challenge:"\u0627\u06cc\u06a9 \u0645\u0631\u0626\u06cc \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",audio_challenge:"\u0627\u06cc\u06a9 \u0622\u0688\u06cc\u0648 \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",
+refresh_btn:"\u0627\u06cc\u06a9 \u0646\u06cc\u0627 \u0686\u06cc\u0644\u0646\u062c \u062d\u0627\u0635\u0644 \u06a9\u0631\u06cc\u06ba",instructions_visual:"",instructions_audio:"\u062c\u0648 \u0633\u0646\u0627\u0626\u06cc \u062f\u06cc\u062a\u0627 \u06c1\u06d2 \u0648\u06c1 \u0679\u0627\u0626\u067e \u06a9\u0631\u06cc\u06ba:",help_btn:"\u0645\u062f\u062f",play_again:"\u0622\u0648\u0627\u0632 \u062f\u0648\u0628\u0627\u0631\u06c1 \u0686\u0644\u0627\u0626\u06cc\u06ba",cant_hear_this:"\u0622\u0648\u0627\u0632 \u06a9\u0648 MP3 \u06a9\u06d2 \u0628\u0637\u0648\u0631 \u0688\u0627\u0624\u0646 \u0644\u0648\u0688 \u06a9\u0631\u06cc\u06ba",
+incorrect_try_again:"\u063a\u0644\u0637\u06d4 \u062f\u0648\u0628\u0627\u0631\u06c1 \u06a9\u0648\u0634\u0634 \u06a9\u0631\u06cc\u06ba\u06d4",image_alt_text:"reCAPTCHA \u0686\u06cc\u0644\u0646\u062c \u0648\u0627\u0644\u06cc \u0634\u0628\u06cc\u06c1",privacy_and_terms:"\u0631\u0627\u0632\u062f\u0627\u0631\u06cc \u0648 \u0634\u0631\u0627\u0626\u0637"},vi:{visual_challenge:"Nh\u1eadn th\u1eed th\u00e1ch h\u00ecnh \u1ea3nh",audio_challenge:"Nh\u1eadn th\u1eed th\u00e1ch \u00e2m thanh",refresh_btn:"Nh\u1eadn th\u1eed th\u00e1ch m\u1edbi",
+instructions_visual:"Nh\u1eadp v\u0103n b\u1ea3n:",instructions_audio:"Nh\u1eadp n\u1ed9i dung b\u1ea1n nghe th\u1ea5y:",help_btn:"Tr\u1ee3 gi\u00fap",play_again:"Ph\u00e1t l\u1ea1i \u00e2m thanh",cant_hear_this:"T\u1ea3i \u00e2m thanh xu\u1ed1ng d\u01b0\u1edbi d\u1ea1ng MP3",incorrect_try_again:"Kh\u00f4ng ch\u00ednh x\u00e1c. H\u00e3y th\u1eed l\u1ea1i.",image_alt_text:"H\u00ecnh x\u00e1c th\u1ef1c reCAPTCHA",privacy_and_terms:"B\u1ea3o m\u1eadt v\u00e0 \u0111i\u1ec1u kho\u1ea3n"},"zh-CN":ra,"zh-HK":{visual_challenge:"\u56de\u7b54\u5716\u50cf\u9a57\u8b49\u554f\u984c",
+audio_challenge:"\u53d6\u5f97\u8a9e\u97f3\u9a57\u8b49\u554f\u984c",refresh_btn:"\u63db\u4e00\u500b\u9a57\u8b49\u554f\u984c",instructions_visual:"\u8f38\u5165\u6587\u5b57\uff1a",instructions_audio:"\u9375\u5165\u60a8\u6240\u807d\u5230\u7684\uff1a",help_btn:"\u8aaa\u660e",play_again:"\u518d\u6b21\u64ad\u653e\u8072\u97f3",cant_hear_this:"\u5c07\u8072\u97f3\u4e0b\u8f09\u70ba MP3",incorrect_try_again:"\u4e0d\u6b63\u78ba\uff0c\u518d\u8a66\u4e00\u6b21\u3002",image_alt_text:"reCAPTCHA \u9a57\u8b49\u6587\u5b57\u5716\u7247",
+privacy_and_terms:"\u79c1\u96b1\u6b0a\u8207\u689d\u6b3e"},"zh-TW":{visual_challenge:"\u53d6\u5f97\u5716\u7247\u9a57\u8b49\u554f\u984c",audio_challenge:"\u53d6\u5f97\u8a9e\u97f3\u9a57\u8b49\u554f\u984c",refresh_btn:"\u53d6\u5f97\u65b0\u7684\u9a57\u8b49\u554f\u984c",instructions_visual:"\u8acb\u8f38\u5165\u5716\u7247\u4e2d\u7684\u6587\u5b57\uff1a",instructions_audio:"\u8acb\u8f38\u5165\u8a9e\u97f3\u5167\u5bb9\uff1a",help_btn:"\u8aaa\u660e",play_again:"\u518d\u6b21\u64ad\u653e",cant_hear_this:"\u4ee5 MP3 \u683c\u5f0f\u4e0b\u8f09\u8072\u97f3",
+incorrect_try_again:"\u9a57\u8b49\u78bc\u6709\u8aa4\uff0c\u8acb\u518d\u8a66\u4e00\u6b21\u3002",image_alt_text:"reCAPTCHA \u9a57\u8b49\u6587\u5b57\u5716\u7247",privacy_and_terms:"\u96b1\u79c1\u6b0a\u8207\u689d\u6b3e"},zu:{visual_challenge:"Thola inselelo ebonakalayo",audio_challenge:"Thola inselelo yokulalelwayo",refresh_btn:"Thola inselelo entsha",instructions_visual:"",instructions_audio:"Bhala okuzwayo:",help_btn:"Usizo",play_again:"Phinda udlale okulalelwayo futhi",cant_hear_this:"Layisha umsindo njenge-MP3",
+incorrect_try_again:"Akulungile. Zama futhi.",image_alt_text:"umfanekiso oyinselelo we-reCAPTCHA",privacy_and_terms:"Okwangasese kanye nemigomo"},tl:la,he:oa,"in":na,mo:qa,zh:ra};var x=function(a){if(Error.captureStackTrace)Error.captureStackTrace(this,x);else{var b=Error().stack;b&&(this.stack=b)}a&&(this.message=String(a))};u(x,Error);x.prototype.name="CustomError";var ta;var ua=function(a,b){for(var c=a.split("%s"),d="",e=Array.prototype.slice.call(arguments,1);e.length&&1<c.length;)d+=c.shift()+e.shift();return d+c.join("%s")},va=String.prototype.trim?function(a){return a.trim()}:function(a){return a.replace(/^[\s\xa0]+|[\s\xa0]+$/g,"")},Da=function(a){if(!wa.test(a))return a;-1!=a.indexOf("&")&&(a=a.replace(xa,"&amp;"));-1!=a.indexOf("<")&&(a=a.replace(ya,"&lt;"));-1!=a.indexOf(">")&&(a=a.replace(za,"&gt;"));-1!=a.indexOf('"')&&(a=a.replace(Aa,"&quot;"));-1!=a.indexOf("'")&&
+(a=a.replace(Ba,"&#39;"));-1!=a.indexOf("\x00")&&(a=a.replace(Ca,"&#0;"));return a},xa=/&/g,ya=/</g,za=/>/g,Aa=/"/g,Ba=/'/g,Ca=/\x00/g,wa=/[\x00&<>"']/,Ea=function(a,b){return a<b?-1:a>b?1:0},Fa=function(a){return String(a).replace(/\-([a-z])/g,function(a,c){return c.toUpperCase()})},Ga=function(a){var b=q(void 0)?"undefined".replace(/([-()\[\]{}+?*.$\^|,:#<!\\])/g,"\\$1").replace(/\x08/g,"\\x08"):"\\s";return a.replace(new RegExp("(^"+(b?"|["+b+"]+":"")+")([a-z])","g"),function(a,b,e){return b+e.toUpperCase()})};var Ha=function(a,b){b.unshift(a);x.call(this,ua.apply(null,b));b.shift()};u(Ha,x);Ha.prototype.name="AssertionError";
+var Ia=function(a,b,c,d){var e="Assertion failed";if(c)var e=e+(": "+c),g=d;else a&&(e+=": "+a,g=b);throw new Ha(""+e,g||[]);},y=function(a,b,c){a||Ia("",null,b,Array.prototype.slice.call(arguments,2))},Ja=function(a,b){throw new Ha("Failure"+(a?": "+a:""),Array.prototype.slice.call(arguments,1));},Ka=function(a,b,c){q(a)||Ia("Expected string but got %s: %s.",[n(a),a],b,Array.prototype.slice.call(arguments,2));return a},La=function(a,b,c){r(a)||Ia("Expected function but got %s: %s.",[n(a),a],b,Array.prototype.slice.call(arguments,
+2))};var z=Array.prototype,Ma=z.indexOf?function(a,b,c){y(null!=a.length);return z.indexOf.call(a,b,c)}:function(a,b,c){c=null==c?0:0>c?Math.max(0,a.length+c):c;if(q(a))return q(b)&&1==b.length?a.indexOf(b,c):-1;for(;c<a.length;c++)if(c in a&&a[c]===b)return c;return-1},Na=z.forEach?function(a,b,c){y(null!=a.length);z.forEach.call(a,b,c)}:function(a,b,c){for(var d=a.length,e=q(a)?a.split(""):a,g=0;g<d;g++)g in e&&b.call(c,e[g],g,a)},Oa=z.map?function(a,b,c){y(null!=a.length);return z.map.call(a,b,c)}:
+function(a,b,c){for(var d=a.length,e=Array(d),g=q(a)?a.split(""):a,f=0;f<d;f++)f in g&&(e[f]=b.call(c,g[f],f,a));return e},Pa=z.some?function(a,b,c){y(null!=a.length);return z.some.call(a,b,c)}:function(a,b,c){for(var d=a.length,e=q(a)?a.split(""):a,g=0;g<d;g++)if(g in e&&b.call(c,e[g],g,a))return!0;return!1},Qa=function(a,b){var c=Ma(a,b),d;if(d=0<=c)y(null!=a.length),z.splice.call(a,c,1);return d},Ra=function(a){var b=a.length;if(0<b){for(var c=Array(b),d=0;d<b;d++)c[d]=a[d];return c}return[]},
+Sa=function(a,b,c){y(null!=a.length);return 2>=arguments.length?z.slice.call(a,b):z.slice.call(a,b,c)};var Ta=function(a,b){for(var c in a)b.call(void 0,a[c],c,a)},Ua=function(a){var b=[],c=0,d;for(d in a)b[c++]=d;return b},Va=function(a){for(var b in a)return!1;return!0},Xa=function(){var a=Wa()?k.google_ad:null,b={},c;for(c in a)b[c]=a[c];return b},Ya="constructor hasOwnProperty isPrototypeOf propertyIsEnumerable toLocaleString toString valueOf".split(" "),Za=function(a,b){for(var c,d,e=1;e<arguments.length;e++){d=arguments[e];for(c in d)a[c]=d[c];for(var g=0;g<Ya.length;g++)c=Ya[g],Object.prototype.hasOwnProperty.call(d,
+c)&&(a[c]=d[c])}},$a=function(a){var b=arguments.length;if(1==b&&p(arguments[0]))return $a.apply(null,arguments[0]);for(var c={},d=0;d<b;d++)c[arguments[d]]=!0;return c};var A;t:{var ab=k.navigator;if(ab){var bb=ab.userAgent;if(bb){A=bb;break t}}A=""}var B=function(a){return-1!=A.indexOf(a)};var cb=B("Opera")||B("OPR"),C=B("Trident")||B("MSIE"),D=B("Gecko")&&-1==A.toLowerCase().indexOf("webkit")&&!(B("Trident")||B("MSIE")),E=-1!=A.toLowerCase().indexOf("webkit"),db=function(){var a=k.document;return a?a.documentMode:void 0},eb=function(){var a="",b;if(cb&&k.opera)return a=k.opera.version,r(a)?a():a;D?b=/rv\:([^\);]+)(\)|;)/:C?b=/\b(?:MSIE|rv)[: ]([^\);]+)(\)|;)/:E&&(b=/WebKit\/(\S+)/);b&&(a=(a=b.exec(A))?a[1]:"");return C&&(b=db(),b>parseFloat(a))?String(b):a}(),fb={},F=function(a){var b;
+if(!(b=fb[a])){b=0;for(var c=va(String(eb)).split("."),d=va(String(a)).split("."),e=Math.max(c.length,d.length),g=0;0==b&&g<e;g++){var f=c[g]||"",m=d[g]||"",$=RegExp("(\\d*)(\\D*)","g"),K=RegExp("(\\d*)(\\D*)","g");do{var G=$.exec(f)||["","",""],aa=K.exec(m)||["","",""];if(0==G[0].length&&0==aa[0].length)break;b=Ea(0==G[1].length?0:parseInt(G[1],10),0==aa[1].length?0:parseInt(aa[1],10))||Ea(0==G[2].length,0==aa[2].length)||Ea(G[2],aa[2])}while(0==b)}b=fb[a]=0<=b}return b},gb=k.document,hb=gb&&C?db()||
+("CSS1Compat"==gb.compatMode?parseInt(eb,10):5):void 0;var ib=function(a){if(8192>a.length)return String.fromCharCode.apply(null,a);for(var b="",c=0;c<a.length;c+=8192)var d=Sa(a,c,c+8192),b=b+String.fromCharCode.apply(null,d);return b},jb=function(a){return Oa(a,function(a){a=a.toString(16);return 1<a.length?a:"0"+a}).join("")};var kb=null,lb=null,mb=function(a){if(!kb){kb={};lb={};for(var b=0;65>b;b++)kb[b]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".charAt(b),lb[kb[b]]=b,62<=b&&(lb["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.".charAt(b)]=b)}for(var b=lb,c=[],d=0;d<a.length;){var e=b[a.charAt(d++)],g=d<a.length?b[a.charAt(d)]:0;++d;var f=d<a.length?b[a.charAt(d)]:64;++d;var m=d<a.length?b[a.charAt(d)]:64;++d;if(null==e||null==g||null==f||null==m)throw Error();c.push(e<<2|g>>
+4);64!=f&&(c.push(g<<4&240|f>>2),64!=m&&c.push(f<<6&192|m))}return c};var H=function(){this.disposed_=this.disposed_;this.onDisposeCallbacks_=this.onDisposeCallbacks_};H.prototype.disposed_=!1;H.prototype.dispose=function(){this.disposed_||(this.disposed_=!0,this.disposeInternal())};var nb=function(a,b){a.onDisposeCallbacks_||(a.onDisposeCallbacks_=[]);a.onDisposeCallbacks_.push(l(void 0)?s(b,void 0):b)};H.prototype.disposeInternal=function(){if(this.onDisposeCallbacks_)for(;this.onDisposeCallbacks_.length;)this.onDisposeCallbacks_.shift()()};
+var ob=function(a){a&&"function"==typeof a.dispose&&a.dispose()};var pb=!C||C&&9<=hb;!D&&!C||C&&C&&9<=hb||D&&F("1.9.1");C&&F("9");var sb=function(a){return a?new qb(rb(a)):ta||(ta=new qb)},tb=function(a,b){return q(b)?a.getElementById(b):b},vb=function(a,b){Ta(b,function(b,d){"style"==d?a.style.cssText=b:"class"==d?a.className=b:"for"==d?a.htmlFor=b:d in ub?a.setAttribute(ub[d],b):0==d.lastIndexOf("aria-",0)||0==d.lastIndexOf("data-",0)?a.setAttribute(d,b):a[d]=b})},ub={cellpadding:"cellPadding",cellspacing:"cellSpacing",colspan:"colSpan",frameborder:"frameBorder",height:"height",maxlength:"maxLength",role:"role",rowspan:"rowSpan",
+type:"type",usemap:"useMap",valign:"vAlign",width:"width"},xb=function(a,b,c){return wb(document,arguments)},wb=function(a,b){var c=b[0],d=b[1];if(!pb&&d&&(d.name||d.type)){c=["<",c];d.name&&c.push(' name="',Da(d.name),'"');if(d.type){c.push(' type="',Da(d.type),'"');var e={};Za(e,d);delete e.type;d=e}c.push(">");c=c.join("")}c=a.createElement(c);d&&(q(d)?c.className=d:p(d)?c.className=d.join(" "):vb(c,d));2<b.length&&yb(a,c,b);return c},yb=function(a,b,c){function d(c){c&&b.appendChild(q(c)?a.createTextNode(c):
+c)}for(var e=2;e<c.length;e++){var g=c[e];!ca(g)||da(g)&&0<g.nodeType?d(g):Na(zb(g)?Ra(g):g,d)}},Ab=function(a){for(var b;b=a.firstChild;)a.removeChild(b)},Bb=function(a){a&&a.parentNode&&a.parentNode.removeChild(a)},rb=function(a){y(a,"Node cannot be null or undefined.");return 9==a.nodeType?a:a.ownerDocument||a.document},zb=function(a){if(a&&"number"==typeof a.length){if(da(a))return"function"==typeof a.item||"string"==typeof a.item;if(r(a))return"function"==typeof a.item}return!1},qb=function(a){this.document_=
+a||k.document||document};h=qb.prototype;h.getDomHelper=sb;h.getElement=function(a){return tb(this.document_,a)};h.$=qb.prototype.getElement;h.createDom=function(a,b,c){return wb(this.document_,arguments)};h.createElement=function(a){return this.document_.createElement(a)};h.createTextNode=function(a){return this.document_.createTextNode(String(a))};h.appendChild=function(a,b){a.appendChild(b)};var Cb=function(a){k.setTimeout(function(){throw a;},0)},Db,Eb=function(){var a=k.MessageChannel;"undefined"===typeof a&&"undefined"!==typeof window&&window.postMessage&&window.addEventListener&&(a=function(){var a=document.createElement("iframe");a.style.display="none";a.src="";document.documentElement.appendChild(a);var b=a.contentWindow,a=b.document;a.open();a.write("");a.close();var c="callImmediate"+Math.random(),d="file:"==b.location.protocol?"*":b.location.protocol+"//"+b.location.host,a=s(function(a){if(("*"==
+d||a.origin==d)&&a.data==c)this.port1.onmessage()},this);b.addEventListener("message",a,!1);this.port1={};this.port2={postMessage:function(){b.postMessage(c,d)}}});if("undefined"!==typeof a&&!B("Trident")&&!B("MSIE")){var b=new a,c={},d=c;b.port1.onmessage=function(){if(l(c.next)){c=c.next;var a=c.cb;c.cb=null;a()}};return function(a){d.next={cb:a};d=d.next;b.port2.postMessage(0)}}return"undefined"!==typeof document&&"onreadystatechange"in document.createElement("script")?function(a){var b=document.createElement("script");
+b.onreadystatechange=function(){b.onreadystatechange=null;b.parentNode.removeChild(b);b=null;a();a=null};document.documentElement.appendChild(b)}:function(a){k.setTimeout(a,0)}};var Kb=function(a,b){Fb||Gb();Hb||(Fb(),Hb=!0);Ib.push(new Jb(a,b))},Fb,Gb=function(){if(k.Promise&&k.Promise.resolve){var a=k.Promise.resolve();Fb=function(){a.then(Lb)}}else Fb=function(){var a=Lb;!r(k.setImmediate)||k.Window&&k.Window.prototype.setImmediate==k.setImmediate?(Db||(Db=Eb()),Db(a)):k.setImmediate(a)}},Hb=!1,Ib=[],Lb=function(){for(;Ib.length;){var a=Ib;Ib=[];for(var b=0;b<a.length;b++){var c=a[b];try{c.fn.call(c.scope)}catch(d){Cb(d)}}}Hb=!1},Jb=function(a,b){this.fn=a;this.scope=
+b};var Mb=function(a){a.prototype.then=a.prototype.then;a.prototype.$goog_Thenable=!0},Nb=function(a){if(!a)return!1;try{return!!a.$goog_Thenable}catch(b){return!1}};var L=function(a,b){this.state_=0;this.result_=void 0;this.callbackEntries_=this.parent_=null;this.hadUnhandledRejection_=this.executing_=!1;try{var c=this;a.call(b,function(a){I(c,2,a)},function(a){if(!(a instanceof J))try{if(a instanceof Error)throw a;throw Error("Promise rejected.");}catch(b){}I(c,3,a)})}catch(d){I(this,3,d)}};
+L.prototype.then=function(a,b,c){null!=a&&La(a,"opt_onFulfilled should be a function.");null!=b&&La(b,"opt_onRejected should be a function. Did you pass opt_context as the second argument instead of the third?");return Ob(this,r(a)?a:null,r(b)?b:null,c)};Mb(L);L.prototype.cancel=function(a){0==this.state_&&Kb(function(){var b=new J(a);Pb(this,b)},this)};
+var Pb=function(a,b){if(0==a.state_)if(a.parent_){var c=a.parent_;if(c.callbackEntries_){for(var d=0,e=-1,g=0,f;f=c.callbackEntries_[g];g++)if(f=f.child)if(d++,f==a&&(e=g),0<=e&&1<d)break;0<=e&&(0==c.state_&&1==d?Pb(c,b):(d=c.callbackEntries_.splice(e,1)[0],d.child&&Qb(c),d.onRejected(b)))}}else I(a,3,b)},Sb=function(a,b){a.callbackEntries_&&a.callbackEntries_.length||2!=a.state_&&3!=a.state_||Rb(a);a.callbackEntries_||(a.callbackEntries_=[]);a.callbackEntries_.push(b)},Ob=function(a,b,c,d){var e=
+{child:null,onFulfilled:null,onRejected:null};e.child=new L(function(a,f){e.onFulfilled=b?function(c){try{var e=b.call(d,c);a(e)}catch(K){f(K)}}:a;e.onRejected=c?function(b){try{var e=c.call(d,b);!l(e)&&b instanceof J?f(b):a(e)}catch(K){f(K)}}:f});e.child.parent_=a;Sb(a,e);return e.child};L.prototype.unblockAndFulfill_=function(a){y(1==this.state_);this.state_=0;I(this,2,a)};L.prototype.unblockAndReject_=function(a){y(1==this.state_);this.state_=0;I(this,3,a)};
+var I=function(a,b,c){if(0==a.state_){if(a==c)b=3,c=new TypeError("Promise cannot resolve to itself");else{if(Nb(c)){a.state_=1;c.then(a.unblockAndFulfill_,a.unblockAndReject_,a);return}if(da(c))try{var d=c.then;if(r(d)){Tb(a,c,d);return}}catch(e){b=3,c=e}}a.result_=c;a.state_=b;Rb(a);3!=b||c instanceof J||Ub(a,c)}},Tb=function(a,b,c){a.state_=1;var d=!1,e=function(b){d||(d=!0,a.unblockAndFulfill_(b))},g=function(b){d||(d=!0,a.unblockAndReject_(b))};try{c.call(b,e,g)}catch(f){g(f)}},Rb=function(a){a.executing_||
+(a.executing_=!0,Kb(a.executeCallbacks_,a))};L.prototype.executeCallbacks_=function(){for(;this.callbackEntries_&&this.callbackEntries_.length;){var a=this.callbackEntries_;this.callbackEntries_=[];for(var b=0;b<a.length;b++){var c=a[b],d=this.result_;if(2==this.state_)c.onFulfilled(d);else c.child&&Qb(this),c.onRejected(d)}}this.executing_=!1};
+var Qb=function(a){for(;a&&a.hadUnhandledRejection_;a=a.parent_)a.hadUnhandledRejection_=!1},Ub=function(a,b){a.hadUnhandledRejection_=!0;Kb(function(){a.hadUnhandledRejection_&&Vb.call(null,b)})},Vb=Cb,J=function(a){x.call(this,a)};u(J,x);J.prototype.name="cancel";/*
+ Portions of this code are from MochiKit, received by
+ The Closure Authors under the MIT license. All other code is Copyright
+ 2005-2009 The Closure Authors. All Rights Reserved.
+*/
+var M=function(a,b){this.sequence_=[];this.onCancelFunction_=a;this.defaultScope_=b||null;this.hadError_=this.fired_=!1;this.result_=void 0;this.silentlyCanceled_=this.blocking_=this.blocked_=!1;this.unhandledErrorId_=0;this.parent_=null;this.branches_=0};
+M.prototype.cancel=function(a){if(this.fired_)this.result_ instanceof M&&this.result_.cancel();else{if(this.parent_){var b=this.parent_;delete this.parent_;a?b.cancel(a):(b.branches_--,0>=b.branches_&&b.cancel())}this.onCancelFunction_?this.onCancelFunction_.call(this.defaultScope_,this):this.silentlyCanceled_=!0;this.fired_||Wb(this,new Xb)}};M.prototype.continue_=function(a,b){this.blocked_=!1;Yb(this,a,b)};
+var Yb=function(a,b,c){a.fired_=!0;a.result_=c;a.hadError_=!b;Zb(a)},ac=function(a){if(a.fired_){if(!a.silentlyCanceled_)throw new $b;a.silentlyCanceled_=!1}};M.prototype.callback=function(a){ac(this);bc(a);Yb(this,!0,a)};var Wb=function(a,b){ac(a);bc(b);Yb(a,!1,b)},bc=function(a){y(!(a instanceof M),"An execution sequence may not be initiated with a blocking Deferred.")},cc=function(a,b,c,d){y(!a.blocking_,"Blocking Deferreds can not be re-used");a.sequence_.push([b,c,d]);a.fired_&&Zb(a)};
+M.prototype.then=function(a,b,c){var d,e,g=new L(function(a,b){d=a;e=b});cc(this,d,function(a){a instanceof Xb?g.cancel():e(a)});return g.then(a,b,c)};Mb(M);
+var dc=function(a){return Pa(a.sequence_,function(a){return r(a[1])})},Zb=function(a){if(a.unhandledErrorId_&&a.fired_&&dc(a)){var b=a.unhandledErrorId_,c=ec[b];c&&(k.clearTimeout(c.id_),delete ec[b]);a.unhandledErrorId_=0}a.parent_&&(a.parent_.branches_--,delete a.parent_);for(var b=a.result_,d=c=!1;a.sequence_.length&&!a.blocked_;){var e=a.sequence_.shift(),g=e[0],f=e[1],e=e[2];if(g=a.hadError_?f:g)try{var m=g.call(e||a.defaultScope_,b);l(m)&&(a.hadError_=a.hadError_&&(m==b||m instanceof Error),
+a.result_=b=m);Nb(b)&&(d=!0,a.blocked_=!0)}catch($){b=$,a.hadError_=!0,dc(a)||(c=!0)}}a.result_=b;d&&(m=s(a.continue_,a,!0),d=s(a.continue_,a,!1),b instanceof M?(cc(b,m,d),b.blocking_=!0):b.then(m,d));c&&(b=new fc(b),ec[b.id_]=b,a.unhandledErrorId_=b.id_)},$b=function(){x.call(this)};u($b,x);$b.prototype.message="Deferred has already fired";$b.prototype.name="AlreadyCalledError";var Xb=function(){x.call(this)};u(Xb,x);Xb.prototype.message="Deferred was canceled";Xb.prototype.name="CanceledError";
+var fc=function(a){this.id_=k.setTimeout(s(this.throwError,this),0);this.error_=a};fc.prototype.throwError=function(){y(ec[this.id_],"Cannot throw an error that is not scheduled.");delete ec[this.id_];throw this.error_;};var ec={};var kc=function(a){var b={},c=b.document||document,d=document.createElement("SCRIPT"),e={script_:d,timeout_:void 0},g=new M(gc,e),f=null,m=null!=b.timeout?b.timeout:5E3;0<m&&(f=window.setTimeout(function(){hc(d,!0);Wb(g,new ic(1,"Timeout reached for loading script "+a))},m),e.timeout_=f);d.onload=d.onreadystatechange=function(){d.readyState&&"loaded"!=d.readyState&&"complete"!=d.readyState||(hc(d,b.cleanupWhenDone||!1,f),g.callback(null))};d.onerror=function(){hc(d,!0,f);Wb(g,new ic(0,"Error while loading script "+
+a))};vb(d,{type:"text/javascript",charset:"UTF-8",src:a});jc(c).appendChild(d);return g},jc=function(a){var b=a.getElementsByTagName("HEAD");return b&&0!=b.length?b[0]:a.documentElement},gc=function(){if(this&&this.script_){var a=this.script_;a&&"SCRIPT"==a.tagName&&hc(a,!0,this.timeout_)}},hc=function(a,b,c){null!=c&&k.clearTimeout(c);a.onload=ba;a.onerror=ba;a.onreadystatechange=ba;b&&window.setTimeout(function(){Bb(a)},0)},ic=function(a,b){var c="Jsloader error (code #"+a+")";b&&(c+=": "+b);x.call(this,
+c);this.code=a};u(ic,x);var lc=function(a){lc[" "](a);return a};lc[" "]=ba;var mc=!C||C&&9<=hb,nc=C&&!F("9");!E||F("528");D&&F("1.9b")||C&&F("8")||cb&&F("9.5")||E&&F("528");D&&!F("8")||C&&F("9");var N=function(a,b){this.type=a;this.currentTarget=this.target=b;this.defaultPrevented=this.propagationStopped_=!1;this.returnValue_=!0};N.prototype.disposeInternal=function(){};N.prototype.dispose=function(){};N.prototype.preventDefault=function(){this.defaultPrevented=!0;this.returnValue_=!1};var O=function(a,b){N.call(this,a?a.type:"");this.relatedTarget=this.currentTarget=this.target=null;this.charCode=this.keyCode=this.button=this.screenY=this.screenX=this.clientY=this.clientX=this.offsetY=this.offsetX=0;this.metaKey=this.shiftKey=this.altKey=this.ctrlKey=!1;this.event_=this.state=null;if(a){var c=this.type=a.type;this.target=a.target||a.srcElement;this.currentTarget=b;var d=a.relatedTarget;if(d){if(D){var e;t:{try{lc(d.nodeName);e=!0;break t}catch(g){}e=!1}e||(d=null)}}else"mouseover"==
+c?d=a.fromElement:"mouseout"==c&&(d=a.toElement);this.relatedTarget=d;this.offsetX=E||void 0!==a.offsetX?a.offsetX:a.layerX;this.offsetY=E||void 0!==a.offsetY?a.offsetY:a.layerY;this.clientX=void 0!==a.clientX?a.clientX:a.pageX;this.clientY=void 0!==a.clientY?a.clientY:a.pageY;this.screenX=a.screenX||0;this.screenY=a.screenY||0;this.button=a.button;this.keyCode=a.keyCode||0;this.charCode=a.charCode||("keypress"==c?a.keyCode:0);this.ctrlKey=a.ctrlKey;this.altKey=a.altKey;this.shiftKey=a.shiftKey;this.metaKey=
+a.metaKey;this.state=a.state;this.event_=a;a.defaultPrevented&&this.preventDefault()}};u(O,N);O.prototype.preventDefault=function(){O.superClass_.preventDefault.call(this);var a=this.event_;if(a.preventDefault)a.preventDefault();else if(a.returnValue=!1,nc)try{if(a.ctrlKey||112<=a.keyCode&&123>=a.keyCode)a.keyCode=-1}catch(b){}};O.prototype.disposeInternal=function(){};var oc="closure_listenable_"+(1E6*Math.random()|0),pc=0;var qc=function(a,b,c,d,e){this.listener=a;this.proxy=null;this.src=b;this.type=c;this.capture=!!d;this.handler=e;this.key=++pc;this.removed=this.callOnce=!1},rc=function(a){a.removed=!0;a.listener=null;a.proxy=null;a.src=null;a.handler=null};var P=function(a){this.src=a;this.listeners={};this.typeCount_=0};P.prototype.add=function(a,b,c,d,e){var g=a.toString();a=this.listeners[g];a||(a=this.listeners[g]=[],this.typeCount_++);var f=sc(a,b,d,e);-1<f?(b=a[f],c||(b.callOnce=!1)):(b=new qc(b,this.src,g,!!d,e),b.callOnce=c,a.push(b));return b};
+P.prototype.remove=function(a,b,c,d){a=a.toString();if(!(a in this.listeners))return!1;var e=this.listeners[a];b=sc(e,b,c,d);return-1<b?(rc(e[b]),y(null!=e.length),z.splice.call(e,b,1),0==e.length&&(delete this.listeners[a],this.typeCount_--),!0):!1};var tc=function(a,b){var c=b.type;if(!(c in a.listeners))return!1;var d=Qa(a.listeners[c],b);d&&(rc(b),0==a.listeners[c].length&&(delete a.listeners[c],a.typeCount_--));return d};
+P.prototype.removeAll=function(a){a=a&&a.toString();var b=0,c;for(c in this.listeners)if(!a||c==a){for(var d=this.listeners[c],e=0;e<d.length;e++)++b,rc(d[e]);delete this.listeners[c];this.typeCount_--}return b};P.prototype.getListener=function(a,b,c,d){a=this.listeners[a.toString()];var e=-1;a&&(e=sc(a,b,c,d));return-1<e?a[e]:null};var sc=function(a,b,c,d){for(var e=0;e<a.length;++e){var g=a[e];if(!g.removed&&g.listener==b&&g.capture==!!c&&g.handler==d)return e}return-1};var uc="closure_lm_"+(1E6*Math.random()|0),vc={},wc=0,xc=function(a,b,c,d,e){if(p(b)){for(var g=0;g<b.length;g++)xc(a,b[g],c,d,e);return null}c=yc(c);if(a&&a[oc])a=a.listen(b,c,d,e);else{if(!b)throw Error("Invalid event type");var g=!!d,f=zc(a);f||(a[uc]=f=new P(a));c=f.add(b,c,!1,d,e);c.proxy||(d=Ac(),c.proxy=d,d.src=a,d.listener=c,a.addEventListener?a.addEventListener(b.toString(),d,g):a.attachEvent(Bc(b.toString()),d),wc++);a=c}return a},Ac=function(){var a=Cc,b=mc?function(c){return a.call(b.src,
+b.listener,c)}:function(c){c=a.call(b.src,b.listener,c);if(!c)return c};return b},Dc=function(a,b,c,d,e){if(p(b))for(var g=0;g<b.length;g++)Dc(a,b[g],c,d,e);else c=yc(c),a&&a[oc]?a.unlisten(b,c,d,e):a&&(a=zc(a))&&(b=a.getListener(b,c,!!d,e))&&Ec(b)},Ec=function(a){if("number"==typeof a||!a||a.removed)return!1;var b=a.src;if(b&&b[oc])return tc(b.eventTargetListeners_,a);var c=a.type,d=a.proxy;b.removeEventListener?b.removeEventListener(c,d,a.capture):b.detachEvent&&b.detachEvent(Bc(c),d);wc--;(c=zc(b))?
+(tc(c,a),0==c.typeCount_&&(c.src=null,b[uc]=null)):rc(a);return!0},Bc=function(a){return a in vc?vc[a]:vc[a]="on"+a},Gc=function(a,b,c,d){var e=1;if(a=zc(a))if(b=a.listeners[b.toString()])for(b=b.concat(),a=0;a<b.length;a++){var g=b[a];g&&g.capture==c&&!g.removed&&(e&=!1!==Fc(g,d))}return Boolean(e)},Fc=function(a,b){var c=a.listener,d=a.handler||a.src;a.callOnce&&Ec(a);return c.call(d,b)},Cc=function(a,b){if(a.removed)return!0;if(!mc){var c;if(!(c=b))t:{c=["window","event"];for(var d=k,e;e=c.shift();)if(null!=
+d[e])d=d[e];else{c=null;break t}c=d}e=c;c=new O(e,this);d=!0;if(!(0>e.keyCode||void 0!=e.returnValue)){t:{var g=!1;if(0==e.keyCode)try{e.keyCode=-1;break t}catch(f){g=!0}if(g||void 0==e.returnValue)e.returnValue=!0}e=[];for(g=c.currentTarget;g;g=g.parentNode)e.push(g);for(var g=a.type,m=e.length-1;!c.propagationStopped_&&0<=m;m--)c.currentTarget=e[m],d&=Gc(e[m],g,!0,c);for(m=0;!c.propagationStopped_&&m<e.length;m++)c.currentTarget=e[m],d&=Gc(e[m],g,!1,c)}return d}return Fc(a,new O(b,this))},zc=function(a){a=
+a[uc];return a instanceof P?a:null},Hc="__closure_events_fn_"+(1E9*Math.random()>>>0),yc=function(a){y(a,"Listener can not be null.");if(r(a))return a;y(a.handleEvent,"An object listener must have handleEvent method.");a[Hc]||(a[Hc]=function(b){return a.handleEvent(b)});return a[Hc]};var Q=function(a){H.call(this);this.handler_=a;this.keys_={}};u(Q,H);var Ic=[];h=Q.prototype;h.listen=function(a,b,c,d){p(b)||(b&&(Ic[0]=b.toString()),b=Ic);for(var e=0;e<b.length;e++){var g=xc(a,b[e],c||this.handleEvent,d||!1,this.handler_||this);if(!g)break;this.keys_[g.key]=g}return this};
+h.unlisten=function(a,b,c,d,e){if(p(b))for(var g=0;g<b.length;g++)this.unlisten(a,b[g],c,d,e);else c=c||this.handleEvent,e=e||this.handler_||this,c=yc(c),d=!!d,b=a&&a[oc]?a.getListener(b,c,d,e):a?(a=zc(a))?a.getListener(b,c,d,e):null:null,b&&(Ec(b),delete this.keys_[b.key]);return this};h.removeAll=function(){Ta(this.keys_,Ec);this.keys_={}};h.disposeInternal=function(){Q.superClass_.disposeInternal.call(this);this.removeAll()};
+h.handleEvent=function(){throw Error("EventHandler.handleEvent not implemented");};var R=function(){H.call(this);this.eventTargetListeners_=new P(this);this.actualEventTarget_=this;this.parentEventTarget_=null};u(R,H);R.prototype[oc]=!0;h=R.prototype;h.setParentEventTarget=function(a){this.parentEventTarget_=a};h.addEventListener=function(a,b,c,d){xc(this,a,b,c,d)};h.removeEventListener=function(a,b,c,d){Dc(this,a,b,c,d)};
+h.dispatchEvent=function(a){Jc(this);var b,c=this.parentEventTarget_;if(c){b=[];for(var d=1;c;c=c.parentEventTarget_)b.push(c),y(1E3>++d,"infinite loop")}c=this.actualEventTarget_;d=a.type||a;if(q(a))a=new N(a,c);else if(a instanceof N)a.target=a.target||c;else{var e=a;a=new N(d,c);Za(a,e)}var e=!0,g;if(b)for(var f=b.length-1;!a.propagationStopped_&&0<=f;f--)g=a.currentTarget=b[f],e=Kc(g,d,!0,a)&&e;a.propagationStopped_||(g=a.currentTarget=c,e=Kc(g,d,!0,a)&&e,a.propagationStopped_||(e=Kc(g,d,!1,a)&&
+e));if(b)for(f=0;!a.propagationStopped_&&f<b.length;f++)g=a.currentTarget=b[f],e=Kc(g,d,!1,a)&&e;return e};h.disposeInternal=function(){R.superClass_.disposeInternal.call(this);this.eventTargetListeners_&&this.eventTargetListeners_.removeAll(void 0);this.parentEventTarget_=null};h.listen=function(a,b,c,d){Jc(this);return this.eventTargetListeners_.add(String(a),b,!1,c,d)};h.unlisten=function(a,b,c,d){return this.eventTargetListeners_.remove(String(a),b,c,d)};
+var Kc=function(a,b,c,d){b=a.eventTargetListeners_.listeners[String(b)];if(!b)return!0;b=b.concat();for(var e=!0,g=0;g<b.length;++g){var f=b[g];if(f&&!f.removed&&f.capture==c){var m=f.listener,$=f.handler||f.src;f.callOnce&&tc(a.eventTargetListeners_,f);e=!1!==m.call($,d)&&e}}return e&&0!=d.returnValue_};R.prototype.getListener=function(a,b,c,d){return this.eventTargetListeners_.getListener(String(a),b,c,d)};var Jc=function(a){y(a.eventTargetListeners_,"Event target is not initialized. Did you call the superclass (goog.events.EventTarget) constructor?")};var S=function(a){R.call(this);this.imageIdToRequestMap_={};this.imageIdToImageMap_={};this.handler_=new Q(this);this.parent_=a};u(S,R);var Lc=[C&&!F("11")?"readystatechange":"load","abort","error"],Mc=function(a,b,c){(c=q(c)?c:c.src)&&(a.imageIdToRequestMap_[b]={src:c,corsRequestType:l(void 0)?void 0:null})};
+S.prototype.start=function(){var a=this.imageIdToRequestMap_;Na(Ua(a),function(b){var c=a[b];if(c&&(delete a[b],!this.disposed_)){var d;d=this.parent_?sb(this.parent_).createDom("img"):new Image;c.corsRequestType&&(d.crossOrigin=c.corsRequestType);this.handler_.listen(d,Lc,this.onNetworkEvent_);this.imageIdToImageMap_[b]=d;d.id=b;d.src=c.src}},this)};
+S.prototype.onNetworkEvent_=function(a){var b=a.currentTarget;if(b){if("readystatechange"==a.type)if("complete"==b.readyState)a.type="load";else return;"undefined"==typeof b.naturalWidth&&("load"==a.type?(b.naturalWidth=b.width,b.naturalHeight=b.height):(b.naturalWidth=0,b.naturalHeight=0));this.dispatchEvent({type:a.type,target:b});!this.disposed_&&(a=b.id,delete this.imageIdToRequestMap_[a],b=this.imageIdToImageMap_[a])&&(delete this.imageIdToImageMap_[a],this.handler_.unlisten(b,Lc,this.onNetworkEvent_),
+Va(this.imageIdToImageMap_)&&Va(this.imageIdToRequestMap_)&&this.dispatchEvent("complete"))}};S.prototype.disposeInternal=function(){delete this.imageIdToRequestMap_;delete this.imageIdToImageMap_;ob(this.handler_);S.superClass_.disposeInternal.call(this)};var T=function(){};T.getInstance=function(){return T.instance_?T.instance_:T.instance_=new T};T.prototype.nextId_=0;var U=function(a){R.call(this);this.dom_=a||sb();this.id_=null;this.inDocument_=!1;this.element_=null;this.googUiComponentHandler_=void 0;this.childIndex_=this.children_=this.parent_=null;this.wasDecorated_=!1};u(U,R);h=U.prototype;h.idGenerator_=T.getInstance();h.getElement=function(){return this.element_};h.setParentEventTarget=function(a){if(this.parent_&&this.parent_!=a)throw Error("Method not supported");U.superClass_.setParentEventTarget.call(this,a)};h.getDomHelper=function(){return this.dom_};
+h.createDom=function(){this.element_=this.dom_.createElement("div")};
+var Oc=function(a,b){if(a.inDocument_)throw Error("Component already rendered");a.element_||a.createDom();b?b.insertBefore(a.element_,null):a.dom_.document_.body.appendChild(a.element_);a.parent_&&!a.parent_.inDocument_||Nc(a)},Nc=function(a){a.inDocument_=!0;Pc(a,function(a){!a.inDocument_&&a.getElement()&&Nc(a)})},Qc=function(a){Pc(a,function(a){a.inDocument_&&Qc(a)});a.googUiComponentHandler_&&a.googUiComponentHandler_.removeAll();a.inDocument_=!1};
+U.prototype.disposeInternal=function(){this.inDocument_&&Qc(this);this.googUiComponentHandler_&&(this.googUiComponentHandler_.dispose(),delete this.googUiComponentHandler_);Pc(this,function(a){a.dispose()});!this.wasDecorated_&&this.element_&&Bb(this.element_);this.parent_=this.element_=this.childIndex_=this.children_=null;U.superClass_.disposeInternal.call(this)};var Pc=function(a,b){a.children_&&Na(a.children_,b,void 0)};
+U.prototype.removeChild=function(a,b){if(a){var c=q(a)?a:a.id_||(a.id_=":"+(a.idGenerator_.nextId_++).toString(36)),d;this.childIndex_&&c?(d=this.childIndex_,d=(c in d?d[c]:void 0)||null):d=null;a=d;if(c&&a){d=this.childIndex_;c in d&&delete d[c];Qa(this.children_,a);b&&(Qc(a),a.element_&&Bb(a.element_));c=a;if(null==c)throw Error("Unable to set parent component");c.parent_=null;U.superClass_.setParentEventTarget.call(c,null)}}if(!a)throw Error("Child is not in parent component");return a};var V=function(a,b,c){U.call(this,c);this.captchaImage_=a;this.adImage_=b&&300==b.naturalWidth&&57==b.naturalHeight?b:null};u(V,U);V.prototype.createDom=function(){V.superClass_.createDom.call(this);var a=this.getElement();this.captchaImage_.alt=W.image_alt_text;this.getDomHelper().appendChild(a,this.captchaImage_);this.adImage_&&(this.adImage_.alt=W.image_alt_text,this.getDomHelper().appendChild(a,this.adImage_),this.adImage_&&Rc(this.adImage_)&&(a.innerHTML+='<div id="recaptcha-ad-choices"><div class="recaptcha-ad-choices-collapsed"><img height="15" width="30" alt="AdChoices" border="0" src="//www.gstatic.com/recaptcha/api/img/adicon.png"/></div><div class="recaptcha-ad-choices-expanded"><a href="https://support.google.com/adsense/troubleshooter/1631343" target="_blank"><img height="15" width="75" alt="AdChoices" border="0" src="//www.gstatic.com/recaptcha/api/img/adchoices.png"/></a></div></div>'))};
+var Rc=function(a){var b=Sc(a,"visibility");a=Sc(a,"display");return"hidden"!=b&&"none"!=a},Sc=function(a,b){var c;t:{c=rb(a);if(c.defaultView&&c.defaultView.getComputedStyle&&(c=c.defaultView.getComputedStyle(a,null))){c=c[b]||c.getPropertyValue(b)||"";break t}c=""}if(!(c=c||(a.currentStyle?a.currentStyle[b]:null))&&(c=a.style[Fa(b)],"undefined"===typeof c)){c=a.style;var d;t:if(d=Fa(b),void 0===a.style[d]){var e=(E?"Webkit":D?"Moz":C?"ms":cb?"O":null)+Ga(d);if(void 0!==a.style[e]){d=e;break t}}c=
+c[d]||""}return c};V.prototype.disposeInternal=function(){delete this.captchaImage_;delete this.adImage_;V.superClass_.disposeInternal.call(this)};var Tc=function(a,b,c){H.call(this);this.listener_=a;this.interval_=b||0;this.handler_=c;this.callback_=s(this.doAction_,this)};u(Tc,H);h=Tc.prototype;h.id_=0;h.disposeInternal=function(){Tc.superClass_.disposeInternal.call(this);this.stop();delete this.listener_;delete this.handler_};
+h.start=function(a){this.stop();var b=this.callback_;a=l(a)?a:this.interval_;if(!r(b))if(b&&"function"==typeof b.handleEvent)b=s(b.handleEvent,b);else throw Error("Invalid listener argument");this.id_=2147483647<a?-1:k.setTimeout(b,a||0)};h.stop=function(){this.isActive()&&k.clearTimeout(this.id_);this.id_=0};h.isActive=function(){return 0!=this.id_};h.doAction_=function(){this.id_=0;this.listener_&&this.listener_.call(this.handler_)};var Uc=function(a,b){H.call(this);this.listener_=a;this.handler_=b;this.delay_=new Tc(s(this.onTick_,this),0,this)};u(Uc,H);h=Uc.prototype;h.interval_=0;h.runUntil_=0;h.disposeInternal=function(){this.delay_.dispose();delete this.listener_;delete this.handler_;Uc.superClass_.disposeInternal.call(this)};h.start=function(a,b){this.stop();var c=b||0;this.interval_=Math.max(a||0,0);this.runUntil_=0>c?-1:ha()+c;this.delay_.start(0>c?this.interval_:Math.min(this.interval_,c))};h.stop=function(){this.delay_.stop()};
+h.isActive=function(){return this.delay_.isActive()};h.onSuccess=function(){};h.onFailure=function(){};h.onTick_=function(){if(this.listener_.call(this.handler_))this.onSuccess();else if(0>this.runUntil_)this.delay_.start(this.interval_);else{var a=this.runUntil_-ha();if(0>=a)this.onFailure();else this.delay_.start(Math.min(this.interval_,a))}};$a("area base br col command embed hr img input keygen link meta param source track wbr".split(" "));$a("action","cite","data","formaction","href","manifest","poster","src");$a("link","script","style");var Vc={sanitizedContentKindHtml:!0},Wc={sanitizedContentKindText:!0},Xc=function(){throw Error("Do not instantiate directly");};Xc.prototype.contentDir=null;Xc.prototype.toString=function(){return this.content};var bd=function(a){var b=Yc;y(b,"Soy template may not be null.");var c=sb().createElement("DIV");a=Zc(b(a||$c,void 0,void 0));b=a.match(ad);y(!b,"This template starts with a %s, which cannot be a child of a <div>, as required by soy internals. Consider using goog.soy.renderElement instead.\nTemplate output: %s",b&&b[0],a);c.innerHTML=a;return 1==c.childNodes.length&&(a=c.firstChild,1==a.nodeType)?a:c},Zc=function(a){if(!da(a))return String(a);if(a instanceof Xc){if(a.contentKind===Vc)return Ka(a.content);
+if(a.contentKind===Wc)return Da(a.content)}Ja("Soy template output is unsafe for use as HTML: "+a);return"zSoyz"},ad=/^<(body|caption|col|colgroup|head|html|tr|td|tbody|thead|tfoot)>/i,$c={};C&&F(8);var cd=function(){Xc.call(this)};u(cd,Xc);cd.prototype.contentKind=Vc;var dd=function(a){function b(a){this.content=a}b.prototype=a.prototype;return function(a,d){var e=new b(String(a));void 0!==d&&(e.contentDir=d);return e}}(cd);(function(a){function b(a){this.content=a}b.prototype=a.prototype;return function(a,d){var e=String(a);if(!e)return"";e=new b(e);void 0!==d&&(e.contentDir=d);return e}})(cd);
+var ed={"\x00":"\\x00","\b":"\\x08","\t":"\\t","\n":"\\n","\x0B":"\\x0b","\f":"\\f","\r":"\\r",'"':"\\x22",$:"\\x24","&":"\\x26","'":"\\x27","(":"\\x28",")":"\\x29","*":"\\x2a","+":"\\x2b",",":"\\x2c","-":"\\x2d",".":"\\x2e","/":"\\/",":":"\\x3a","<":"\\x3c","=":"\\x3d",">":"\\x3e","?":"\\x3f","[":"\\x5b","\\":"\\\\","]":"\\x5d","^":"\\x5e","{":"\\x7b","|":"\\x7c","}":"\\x7d","\u0085":"\\x85","\u2028":"\\u2028","\u2029":"\\u2029"},fd=function(a){return ed[a]},gd=/[\x00\x08-\x0d\x22\x26\x27\/\x3c-\x3e\\\x85\u2028\u2029]/g;var Yc=function(a){return dd('<script type="text/javascript">var challenge = \''+String(a.challenge).replace(gd,fd)+"'; var publisherId = '"+String(a.publisherId).replace(gd,fd)+"';"+("ca-mongoogle"==a.publisherId?'google_page_url = "3pcerttesting.com/dab/recaptcha.html";':"")+"\n google_ad_client = publisherId;\n google_ad_type = 'html';\n google_ad_output = 'js';\n google_image_size = '300x57';\n google_captcha_token = challenge;\n google_ad_request_done = function(ad) {\n window.parent.recaptcha.ads.adutils.googleAdRequestDone(ad);\n };\n \x3c/script><script type=\"text/javascript\" src=\"//pagead2.googlesyndication.com/pagead/show_ads.js\">\x3c/script>")};
+Yc.soyTemplateName="recaptcha.soy.ads.iframeAdsLoader.main";var Wa=function(){var a=k.google_ad;return!!(a&&a.token&&a.imageAdUrl&&a.hashedAnswer&&a.salt&&a.delayedImpressionUrl&&a.engagementUrl)},hd=function(){k.google_ad&&(k.google_ad=null)},id=function(a){a=a||document.body;var b=k.google_ad;b&&b.searchUpliftUrl&&(b=xb("iframe",{src:'data:text/html;charset=utf-8,<body><img src="https://'+b.searchUpliftUrl+'"></img></body>',style:"display:none"}),a.appendChild(b))},jd=0,kd=function(a){var b=new S;Mc(b,"recaptcha-url-"+jd++,a);b.start()},ld=function(a,b){var c=
+RecaptchaState.publisher_id;hd();var d=xb("iframe",{id:"recaptcha-loader-"+jd++,style:"display: none"});document.body.appendChild(d);var e=d.contentWindow?d.contentWindow.document:d.contentDocument;e.open("text/html","replace");e.write(bd({challenge:a,publisherId:c}).innerHTML);e.close();c=new Uc(function(){return!!k.google_ad});c.onSuccess=function(){Bb(d);b()};c.onFailure=function(){Bb(d);b()};c.start(50,2E3)};t("recaptcha.ads.adutils.googleAdRequestDone",function(a){k.google_ad=a});var md=function(){this.blockSize=-1};var nd=function(){this.blockSize=-1;this.blockSize=64;this.chain_=Array(4);this.block_=Array(this.blockSize);this.totalLength_=this.blockLength_=0;this.reset()};u(nd,md);nd.prototype.reset=function(){this.chain_[0]=1732584193;this.chain_[1]=4023233417;this.chain_[2]=2562383102;this.chain_[3]=271733878;this.totalLength_=this.blockLength_=0};
+var od=function(a,b,c){c||(c=0);var d=Array(16);if(q(b))for(var e=0;16>e;++e)d[e]=b.charCodeAt(c++)|b.charCodeAt(c++)<<8|b.charCodeAt(c++)<<16|b.charCodeAt(c++)<<24;else for(e=0;16>e;++e)d[e]=b[c++]|b[c++]<<8|b[c++]<<16|b[c++]<<24;b=a.chain_[0];c=a.chain_[1];var e=a.chain_[2],g=a.chain_[3],f=0,f=b+(g^c&(e^g))+d[0]+3614090360&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[1]+3905402710&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[2]+606105819&4294967295;e=g+(f<<17&4294967295|
+f>>>15);f=c+(b^e&(g^b))+d[3]+3250441966&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[4]+4118548399&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[5]+1200080426&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[6]+2821735955&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[7]+4249261313&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[8]+1770035416&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[9]+2336552879&4294967295;g=b+(f<<12&4294967295|
+f>>>20);f=e+(c^g&(b^c))+d[10]+4294925233&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[11]+2304563134&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(g^c&(e^g))+d[12]+1804603682&4294967295;b=c+(f<<7&4294967295|f>>>25);f=g+(e^b&(c^e))+d[13]+4254626195&4294967295;g=b+(f<<12&4294967295|f>>>20);f=e+(c^g&(b^c))+d[14]+2792965006&4294967295;e=g+(f<<17&4294967295|f>>>15);f=c+(b^e&(g^b))+d[15]+1236535329&4294967295;c=e+(f<<22&4294967295|f>>>10);f=b+(e^g&(c^e))+d[1]+4129170786&4294967295;b=c+(f<<
+5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[6]+3225465664&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[11]+643717713&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[0]+3921069994&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[5]+3593408605&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[10]+38016083&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[15]+3634488961&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[4]+3889429448&4294967295;c=
+e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[9]+568446438&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[14]+3275163606&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[3]+4107603335&4294967295;e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[8]+1163531501&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(e^g&(c^e))+d[13]+2850285829&4294967295;b=c+(f<<5&4294967295|f>>>27);f=g+(c^e&(b^c))+d[2]+4243563512&4294967295;g=b+(f<<9&4294967295|f>>>23);f=e+(b^c&(g^b))+d[7]+1735328473&4294967295;
+e=g+(f<<14&4294967295|f>>>18);f=c+(g^b&(e^g))+d[12]+2368359562&4294967295;c=e+(f<<20&4294967295|f>>>12);f=b+(c^e^g)+d[5]+4294588738&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[8]+2272392833&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[11]+1839030562&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[14]+4259657740&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[1]+2763975236&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[4]+1272893353&4294967295;g=b+(f<<11&4294967295|
+f>>>21);f=e+(g^b^c)+d[7]+4139469664&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[10]+3200236656&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[13]+681279174&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[0]+3936430074&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[3]+3572445317&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[6]+76029189&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(c^e^g)+d[9]+3654602809&4294967295;b=c+(f<<4&4294967295|f>>>28);f=g+(b^c^e)+d[12]+
+3873151461&4294967295;g=b+(f<<11&4294967295|f>>>21);f=e+(g^b^c)+d[15]+530742520&4294967295;e=g+(f<<16&4294967295|f>>>16);f=c+(e^g^b)+d[2]+3299628645&4294967295;c=e+(f<<23&4294967295|f>>>9);f=b+(e^(c|~g))+d[0]+4096336452&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[7]+1126891415&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[14]+2878612391&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[5]+4237533241&4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[12]+1700485571&
+4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[3]+2399980690&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[10]+4293915773&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[1]+2240044497&4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[8]+1873313359&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[15]+4264355552&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[6]+2734768916&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[13]+1309151649&
+4294967295;c=e+(f<<21&4294967295|f>>>11);f=b+(e^(c|~g))+d[4]+4149444226&4294967295;b=c+(f<<6&4294967295|f>>>26);f=g+(c^(b|~e))+d[11]+3174756917&4294967295;g=b+(f<<10&4294967295|f>>>22);f=e+(b^(g|~c))+d[2]+718787259&4294967295;e=g+(f<<15&4294967295|f>>>17);f=c+(g^(e|~b))+d[9]+3951481745&4294967295;a.chain_[0]=a.chain_[0]+b&4294967295;a.chain_[1]=a.chain_[1]+(e+(f<<21&4294967295|f>>>11))&4294967295;a.chain_[2]=a.chain_[2]+e&4294967295;a.chain_[3]=a.chain_[3]+g&4294967295};
+nd.prototype.update=function(a,b){l(b)||(b=a.length);for(var c=b-this.blockSize,d=this.block_,e=this.blockLength_,g=0;g<b;){if(0==e)for(;g<=c;)od(this,a,g),g+=this.blockSize;if(q(a))for(;g<b;){if(d[e++]=a.charCodeAt(g++),e==this.blockSize){od(this,d);e=0;break}}else for(;g<b;)if(d[e++]=a[g++],e==this.blockSize){od(this,d);e=0;break}}this.blockLength_=e;this.totalLength_+=b};var X=function(){Q.call(this);this.callback_=this.element_=null;this.md5_=new nd};u(X,Q);var pd=function(a,b,c,d,e){a.unwatch();a.element_=b;a.callback_=e;a.listen(b,"keyup",s(a.onChanged_,a,c,d))};X.prototype.unwatch=function(){this.element_&&this.callback_&&(this.removeAll(),this.callback_=this.element_=null)};
+X.prototype.onChanged_=function(a,b){var c;c=(c=this.element_.value)?c.replace(/[\s\xa0]+/g,"").toLowerCase():"";this.md5_.reset();this.md5_.update(c+"."+b);c=this.md5_;var d=Array((56>c.blockLength_?c.blockSize:2*c.blockSize)-c.blockLength_);d[0]=128;for(var e=1;e<d.length-8;++e)d[e]=0;for(var g=8*c.totalLength_,e=d.length-8;e<d.length;++e)d[e]=g&255,g/=256;c.update(d);d=Array(16);for(e=g=0;4>e;++e)for(var f=0;32>f;f+=8)d[g++]=c.chain_[e]>>>f&255;jb(d).toLowerCase()==a.toLowerCase()&&this.callback_()};
+X.prototype.disposeInternal=function(){this.element_=null;X.superClass_.disposeInternal.call(this)};var rd=function(a,b,c){this.adObject_=a;this.captchaImageUrl_=b;this.opt_successCallback_=c||null;qd(this)};u(rd,H);var qd=function(a){var b=new S;nb(a,ga(ob,b));Mc(b,"recaptcha_challenge_image",a.captchaImageUrl_);Mc(b,"recaptcha_ad_image",a.adObject_.imageAdUrl);var c={};xc(b,"load",s(function(a,b){a[b.target.id]=b.target},a,c));xc(b,"complete",s(a.handleImagesLoaded_,a,c));b.start()};
+rd.prototype.handleImagesLoaded_=function(a){a=new V(a.recaptcha_challenge_image,a.recaptcha_ad_image);nb(this,ga(ob,a));var b=tb(document,"recaptcha_image");Ab(b);Oc(a,b);a.adImage_&&Rc(a.adImage_)&&(kd(this.adObject_.delayedImpressionUrl),a=new X,nb(this,ga(ob,a)),pd(a,tb(document,"recaptcha_response_field"),this.adObject_.hashedAnswer,this.adObject_.salt,s(function(a,b){a.unwatch();kd(b)},this,a,this.adObject_.engagementUrl)),this.opt_successCallback_&&this.opt_successCallback_("04"+this.adObject_.token))};var W=w;t("RecaptchaStr",W);var Y=k.RecaptchaOptions;t("RecaptchaOptions",Y);var sd={tabindex:0,theme:"red",callback:null,lang:null,custom_theme_widget:null,custom_translations:null};t("RecaptchaDefaultOptions",sd);
+var Z={widget:null,timer_id:-1,style_set:!1,theme:null,type:"image",ajax_verify_cb:null,th1:null,th2:null,th3:null,element:"",ad_captcha_plugin:null,reload_timeout:-1,force_reload:!1,$:function(a){return"string"==typeof a?document.getElementById(a):a},attachEvent:function(a,b,c){a&&a.addEventListener?a.addEventListener(b,c,!1):a&&a.attachEvent&&a.attachEvent("on"+b,c)},create:function(a,b,c){Z.destroy();b&&(Z.widget=Z.$(b),Z.element=b);Z._init_options(c);Z._call_challenge(a)},destroy:function(){var a=
+Z.$("recaptcha_challenge_field");a&&a.parentNode.removeChild(a);-1!=Z.timer_id&&clearInterval(Z.timer_id);Z.timer_id=-1;if(a=Z.$("recaptcha_image"))a.innerHTML="";Z.update_widget();Z.widget&&("custom"!=Z.theme?Z.widget.innerHTML="":Z.widget.style.display="none",Z.widget=null)},focus_response_field:function(){var a=Z.$("recaptcha_response_field");a&&a.focus()},get_challenge:function(){return"undefined"==typeof RecaptchaState?null:RecaptchaState.challenge},get_response:function(){var a=Z.$("recaptcha_response_field");
+return a?a.value:null},ajax_verify:function(a){Z.ajax_verify_cb=a;a=Z.get_challenge()||"";var b=Z.get_response()||"";a=Z._get_api_server()+"/ajaxverify?c="+encodeURIComponent(a)+"&response="+encodeURIComponent(b);Z._add_script(a)},_ajax_verify_callback:function(a){Z.ajax_verify_cb(a)},_get_overridable_url:function(a){var b=window.location.protocol;if("undefined"!=typeof _RecaptchaOverrideApiServer)a=_RecaptchaOverrideApiServer;else if("undefined"!=typeof RecaptchaState&&"string"==typeof RecaptchaState.server&&
+0<RecaptchaState.server.length)return RecaptchaState.server.replace(/\/+$/,"");return b+"//"+a},_get_api_server:function(){return Z._get_overridable_url("www.google.com/recaptcha/api")},_get_static_url_root:function(){return Z._get_overridable_url("www.gstatic.com/recaptcha/api")},_call_challenge:function(a){a=Z._get_api_server()+"/challenge?k="+a+"&ajax=1&cachestop="+Math.random();Z.getLang_()&&(a+="&lang="+Z.getLang_());"undefined"!=typeof Y.extra_challenge_params&&(a+="&"+Y.extra_challenge_params);
+Z._add_script(a)},_add_script:function(a){var b=document.createElement("script");b.type="text/javascript";b.src=a;Z._get_script_area().appendChild(b)},_get_script_area:function(){var a=document.getElementsByTagName("head");return a=!a||1>a.length?document.body:a[0]},_hash_merge:function(a){for(var b={},c=0;c<a.length;c++)for(var d in a[c])b[d]=a[c][d];return b},_init_options:function(a){Y=Z._hash_merge([sd,a||{}])},challenge_callback_internal:function(){Z.update_widget();Z._reset_timer();W=Z._hash_merge([w,
+sa[Z.getLang_()]||{},Y.custom_translations||{}]);window.addEventListener&&window.addEventListener("unload",function(){Z.destroy()},!1);Z._is_ie()&&window.attachEvent&&window.attachEvent("onbeforeunload",function(){});if(0<navigator.userAgent.indexOf("KHTML")){var a=document.createElement("iframe");a.src="about:blank";a.style.height="0px";a.style.width="0px";a.style.visibility="hidden";a.style.border="none";a.appendChild(document.createTextNode("This frame prevents back/forward cache problems in Safari."));
+document.body.appendChild(a)}Z._finish_widget()},_add_css:function(a){if(-1!=navigator.appVersion.indexOf("MSIE 5"))document.write('<style type="text/css">'+a+"</style>");else{var b=document.createElement("style");b.type="text/css";b.styleSheet?b.styleSheet.cssText=a:b.appendChild(document.createTextNode(a));Z._get_script_area().appendChild(b)}},_set_style:function(a){Z.style_set||(Z.style_set=!0,Z._add_css(a+"\n\n.recaptcha_is_showing_audio .recaptcha_only_if_image,.recaptcha_isnot_showing_audio .recaptcha_only_if_audio,.recaptcha_had_incorrect_sol .recaptcha_only_if_no_incorrect_sol,.recaptcha_nothad_incorrect_sol .recaptcha_only_if_incorrect_sol{display:none !important}"))},
+_init_builtin_theme:function(){var a=Z.$,b=Z._get_static_url_root(),c=v.VertCss,d=v.VertHtml,e=b+"/img/"+Z.theme,g="gif",b=Z.theme;"clean"==b&&(c=v.CleanCss,d=v.CleanHtml,g="png");c=c.replace(/IMGROOT/g,e);Z._set_style(c);Z.update_widget();Z.widget.innerHTML='<div id="recaptcha_area">'+d+"</div>";c=Z.getLang_();a("recaptcha_privacy")&&null!=c&&"en"==c.substring(0,2).toLowerCase()&&null!=W.privacy_and_terms&&0<W.privacy_and_terms.length&&(c=document.createElement("a"),c.href="http://www.google.com/intl/en/policies/",
+c.target="_blank",c.innerHTML=W.privacy_and_terms,a("recaptcha_privacy").appendChild(c));c=function(b,c,d,K){var G=a(b);G.src=e+"/"+c+"."+g;c=W[d];G.alt=c;b=a(b+"_btn");b.title=c;Z.attachEvent(b,"click",K)};c("recaptcha_reload","refresh","refresh_btn",function(){Z.reload_internal("r")});c("recaptcha_switch_audio","audio","audio_challenge",function(){Z.switch_type("audio")});c("recaptcha_switch_img","text","visual_challenge",function(){Z.switch_type("image")});c("recaptcha_whatsthis","help","help_btn",
+Z.showhelp);"clean"==b&&(a("recaptcha_logo").src=e+"/logo."+g);a("recaptcha_table").className="recaptchatable recaptcha_theme_"+Z.theme;b=function(b,c){var d=a(b);d&&(RecaptchaState.rtl&&"span"==d.tagName.toLowerCase()&&(d.dir="rtl"),d.appendChild(document.createTextNode(W[c])))};b("recaptcha_instructions_image","instructions_visual");b("recaptcha_instructions_audio","instructions_audio");b("recaptcha_instructions_error","incorrect_try_again");a("recaptcha_instructions_image")||a("recaptcha_instructions_audio")||
+(b="audio"==Z.type?W.instructions_audio:W.instructions_visual,b=b.replace(/:$/,""),a("recaptcha_response_field").setAttribute("placeholder",b))},_finish_widget:function(){var a=Z.$,b=Y,c=b.theme;c in{blackglass:1,clean:1,custom:1,red:1,white:1}||(c="red");Z.theme||(Z.theme=c);"custom"!=Z.theme?Z._init_builtin_theme():Z._set_style("");c=document.createElement("span");c.id="recaptcha_challenge_field_holder";c.style.display="none";a("recaptcha_response_field").parentNode.insertBefore(c,a("recaptcha_response_field"));
+a("recaptcha_response_field").setAttribute("autocomplete","off");a("recaptcha_image").style.width="300px";a("recaptcha_image").style.height="57px";a("recaptcha_challenge_field_holder").innerHTML='<input type="hidden" name="recaptcha_challenge_field" id="recaptcha_challenge_field" value=""/>';Z.th_init();Z.should_focus=!1;Z.th3||Z.force_reload?(Z._set_challenge(RecaptchaState.challenge,"image",!0),setTimeout(function(){Z.reload_internal("i")},100)):Z._set_challenge(RecaptchaState.challenge,"image",
+!1);Z.updateTabIndexes_();Z.update_widget();Z.widget&&(Z.widget.style.display="");b.callback&&b.callback()},updateTabIndexes_:function(){var a=Z.$,b=Y;b.tabindex&&(b=b.tabindex,a("recaptcha_response_field").tabIndex=b++,"audio"==Z.type&&a("recaptcha_audio_play_again")&&(a("recaptcha_audio_play_again").tabIndex=b++,a("recaptcha_audio_download"),a("recaptcha_audio_download").tabIndex=b++),"custom"!=Z.theme&&(a("recaptcha_reload_btn").tabIndex=b++,a("recaptcha_switch_audio_btn").tabIndex=b++,a("recaptcha_switch_img_btn").tabIndex=
+b++,a("recaptcha_whatsthis_btn").tabIndex=b,a("recaptcha_privacy").tabIndex=b++))},switch_type:function(a){if(!((new Date).getTime()<Z.reload_timeout)&&(Z.type=a,Z.reload_internal("audio"==Z.type?"a":"v"),"custom"!=Z.theme)){a=Z.$;var b="audio"==Z.type?W.instructions_audio:W.instructions_visual,b=b.replace(/:$/,"");a("recaptcha_response_field").setAttribute("placeholder",b)}},reload:function(){Z.reload_internal("r")},reload_internal:function(a){var b=Y,c=RecaptchaState,d=(new Date).getTime();d<Z.reload_timeout||
+(Z.reload_timeout=d+1E3,"undefined"==typeof a&&(a="r"),d=Z._get_api_server()+"/reload?c="+c.challenge+"&k="+c.site+"&reason="+a+"&type="+Z.type,Z.getLang_()&&(d+="&lang="+Z.getLang_()),"undefined"!=typeof b.extra_challenge_params&&(d+="&"+b.extra_challenge_params),Z.th_callback_invoke(),Z.th1&&(d+="&th="+Z.th1,Z.th1=""),"audio"==Z.type&&(d=b.audio_beta_12_08?d+"&audio_beta_12_08=1":d+"&new_audio_default=1"),Z.should_focus="t"!=a&&"i"!=a,Z._add_script(d),ob(Z.ad_captcha_plugin),c.publisher_id=null)},
+th_callback_invoke:function(){if(Z.th3)try{var a=Z.th3.exec();a&&1600>a.length&&(Z.th1=a)}catch(b){Z.th1=""}},finish_reload:function(a,b,c,d){RecaptchaState.payload_url=c;RecaptchaState.is_incorrect=!1;RecaptchaState.publisher_id=d;Z._set_challenge(a,b,!1);Z.updateTabIndexes_()},_set_challenge:function(a,b,c){"image"==b&&RecaptchaState.publisher_id?ld(a,function(){Z._set_challenge_internal(a,b,c)}):Z._set_challenge_internal(a,b,c)},_set_challenge_internal:function(a,b,c){var d=Z.$,e=RecaptchaState;
+e.challenge=a;Z.type=b;d("recaptcha_challenge_field").value=e.challenge;c||("audio"==b?(d("recaptcha_image").innerHTML=Z.getAudioCaptchaHtml(),Z._loop_playback()):"image"==b&&(a=e.payload_url,a||(a=Z._get_api_server()+"/image?c="+e.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1="")),id(d("recaptcha_widget_div")),Wa()?Z.ad_captcha_plugin=new rd(Xa(),a,function(a){RecaptchaState.challenge=a;d("recaptcha_challenge_field").value=a}):d("recaptcha_image").innerHTML='<img id="recaptcha_challenge_image" alt="'+
+W.image_alt_text+'" height="57" width="300" src="'+a+'" />',hd()));Z._css_toggle("recaptcha_had_incorrect_sol","recaptcha_nothad_incorrect_sol",e.is_incorrect);Z._css_toggle("recaptcha_is_showing_audio","recaptcha_isnot_showing_audio","audio"==b);Z._clear_input();Z.should_focus&&Z.focus_response_field();Z._reset_timer()},_reset_timer:function(){clearInterval(Z.timer_id);var a=Math.max(1E3*(RecaptchaState.timeout-60),6E4);Z.timer_id=setInterval(function(){Z.reload_internal("t")},a);return a},showhelp:function(){window.open(Z._get_help_link(),
+"recaptcha_popup","width=460,height=580,location=no,menubar=no,status=no,toolbar=no,scrollbars=yes,resizable=yes")},_clear_input:function(){Z.$("recaptcha_response_field").value=""},_displayerror:function(a){var b=Z.$;b("recaptcha_image").innerHTML="";b("recaptcha_image").appendChild(document.createTextNode(a))},reloaderror:function(a){Z._displayerror(a)},_is_ie:function(){return 0<navigator.userAgent.indexOf("MSIE")&&!window.opera},_css_toggle:function(a,b,c){Z.update_widget();var d=Z.widget;d||
+(d=document.body);var e=d.className,e=e.replace(new RegExp("(^|\\s+)"+a+"(\\s+|$)")," "),e=e.replace(new RegExp("(^|\\s+)"+b+"(\\s+|$)")," ");d.className=e+(" "+(c?a:b))},_get_help_link:function(){var a="https://support.google.com/recaptcha/";Z.getLang_()&&(a+="?hl="+Z.getLang_());return a},playAgain:function(){Z.$("recaptcha_image").innerHTML=Z.getAudioCaptchaHtml();Z._loop_playback()},_loop_playback:function(){var a=Z.$("recaptcha_audio_play_again");a&&Z.attachEvent(a,"click",function(){Z.playAgain();
+return!1})},getAudioCaptchaHtml:function(){var a=RecaptchaState.payload_url;a||(a=Z._get_api_server()+"/audio.mp3?c="+RecaptchaState.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1=""));var b=Z._get_api_server()+"/swf/audiocaptcha.swf?v2",b=Z._is_ie()?'<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" id="audiocaptcha" width="0" height="0" codebase="https://fpdownload.macromedia.com/get/flashplayer/current/swflash.cab"><param name="movie" value="'+b+'" /><param name="quality" value="high" /><param name="bgcolor" value="#869ca7" /><param name="allowScriptAccess" value="always" /></object><br/>':
+'<embed src="'+b+'" quality="high" bgcolor="#869ca7" width="0" height="0" name="audiocaptcha" align="middle" play="true" loop="false" quality="high" allowScriptAccess="always" type="application/x-shockwave-flash" pluginspage="http://www.adobe.com/go/getflashplayer" /></embed>',c="";Z.checkFlashVer()&&(c="<br/>"+Z.getSpan_('<a id="recaptcha_audio_play_again" class="recaptcha_audio_cant_hear_link">'+W.play_again+"</a>"));c+="<br/>"+Z.getSpan_('<a id="recaptcha_audio_download" class="recaptcha_audio_cant_hear_link" target="_blank" href="'+
+a+'">'+W.cant_hear_this+"</a>");return b+c},getSpan_:function(a){return"<span"+(RecaptchaState&&RecaptchaState.rtl?' dir="rtl"':"")+">"+a+"</span>"},gethttpwavurl:function(){if("audio"!=Z.type)return"";var a=RecaptchaState.payload_url;a||(a=Z._get_api_server()+"/image?c="+RecaptchaState.challenge,Z.th_callback_invoke(),Z.th1&&(a+="&th="+Z.th1,Z.th1=""));return a},checkFlashVer:function(){var a=-1!=navigator.appVersion.indexOf("MSIE"),b=-1!=navigator.appVersion.toLowerCase().indexOf("win"),c=-1!=navigator.userAgent.indexOf("Opera"),
+d=-1;if(null!=navigator.plugins&&0<navigator.plugins.length){if(navigator.plugins["Shockwave Flash 2.0"]||navigator.plugins["Shockwave Flash"])d=navigator.plugins["Shockwave Flash"+(navigator.plugins["Shockwave Flash 2.0"]?" 2.0":"")].description.split(" ")[2].split(".")[0]}else if(a&&b&&!c)try{d=(new ActiveXObject("ShockwaveFlash.ShockwaveFlash.7")).GetVariable("$version").split(" ")[1].split(",")[0]}catch(e){}return 9<=d},getLang_:function(){return Y.lang?Y.lang:"undefined"!=typeof RecaptchaState&&
+RecaptchaState.lang?RecaptchaState.lang:null},challenge_callback:function(){Z.force_reload=!!RecaptchaState.force_reload;if(RecaptchaState.t3){var a=RecaptchaState.t1?ib(mb(RecaptchaState.t1)):"",b=RecaptchaState.t2?ib(mb(RecaptchaState.t2)):"",c=RecaptchaState.t3?ib(mb(RecaptchaState.t3)):"";Z.th2=c;if(a)b=kc(a),cc(b,Z.challenge_callback_internal,null,void 0),cc(b,null,Z.challenge_callback_internal,void 0);else{if(k.execScript)k.execScript(b,"JavaScript");else if(k.eval)null==ia&&(k.eval("var _et_ = 1;"),
+"undefined"!=typeof k._et_?(delete k._et_,ia=!0):ia=!1),ia?k.eval(b):(a=k.document,c=a.createElement("script"),c.type="text/javascript",c.defer=!1,c.appendChild(a.createTextNode(b)),a.body.appendChild(c),a.body.removeChild(c));else throw Error("goog.globalEval not available");Z.challenge_callback_internal()}}else Z.challenge_callback_internal()},th_init:function(){try{k.thintinel&&k.thintinel.th&&(Z.th3=new k.thintinel.th(Z.th2),Z.th2="")}catch(a){}},update_widget:function(){Z.element&&(Z.widget=
+Z.$(Z.element))}};t("Recaptcha",Z);})()
diff --git a/synapse/static/client/register/js/register.js b/synapse/static/client/register/js/register.js
new file mode 100644
index 00000000..b62763a2
--- /dev/null
+++ b/synapse/static/client/register/js/register.js
@@ -0,0 +1,117 @@
+window.matrixRegistration = {
+ endpoint: location.origin + "/_matrix/client/api/v1/register"
+};
+
+var setupCaptcha = function() {
+ if (!window.matrixRegistrationConfig) {
+ return;
+ }
+ $.get(matrixRegistration.endpoint, function(response) {
+ var serverExpectsCaptcha = false;
+ for (var i=0; i<response.flows.length; i++) {
+ var flow = response.flows[i];
+ if ("m.login.recaptcha" === flow.type) {
+ serverExpectsCaptcha = true;
+ break;
+ }
+ }
+ if (!serverExpectsCaptcha) {
+ console.log("This server does not require a captcha.");
+ return;
+ }
+ console.log("Setting up ReCaptcha for "+matrixRegistration.endpoint);
+ var public_key = window.matrixRegistrationConfig.recaptcha_public_key;
+ if (public_key === undefined) {
+ console.error("No public key defined for captcha!");
+ setFeedbackString("Misconfigured captcha for server. Contact server admin.");
+ return;
+ }
+ Recaptcha.create(public_key,
+ "regcaptcha",
+ {
+ theme: "red",
+ callback: Recaptcha.focus_response_field
+ });
+ window.matrixRegistration.isUsingRecaptcha = true;
+ }).error(errorFunc);
+
+};
+
+var submitCaptcha = function(user, pwd) {
+ var challengeToken = Recaptcha.get_challenge();
+ var captchaEntry = Recaptcha.get_response();
+ var data = {
+ type: "m.login.recaptcha",
+ challenge: challengeToken,
+ response: captchaEntry
+ };
+ console.log("Submitting captcha");
+ $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
+ console.log("Success -> "+JSON.stringify(response));
+ submitPassword(user, pwd, response.session);
+ }).error(function(err) {
+ Recaptcha.reload();
+ errorFunc(err);
+ });
+};
+
+var submitPassword = function(user, pwd, session) {
+ console.log("Registering...");
+ var data = {
+ type: "m.login.password",
+ user: user,
+ password: pwd,
+ session: session
+ };
+ $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
+ matrixRegistration.onRegistered(
+ response.home_server, response.user_id, response.access_token
+ );
+ }).error(errorFunc);
+};
+
+var errorFunc = function(err) {
+ if (err.responseJSON && err.responseJSON.error) {
+ setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
+ }
+ else {
+ setFeedbackString("Request failed: " + err.status);
+ }
+};
+
+var setFeedbackString = function(text) {
+ $("#feedback").text(text);
+};
+
+matrixRegistration.onLoad = function() {
+ setupCaptcha();
+};
+
+matrixRegistration.signUp = function() {
+ var user = $("#desired_user_id").val();
+ if (user.length == 0) {
+ setFeedbackString("Must specify a username.");
+ return;
+ }
+ var pwd1 = $("#pwd1").val();
+ var pwd2 = $("#pwd2").val();
+ if (pwd1.length < 6) {
+ setFeedbackString("Password: min. 6 characters.");
+ return;
+ }
+ if (pwd1 != pwd2) {
+ setFeedbackString("Passwords do not match.");
+ return;
+ }
+ if (window.matrixRegistration.isUsingRecaptcha) {
+ submitCaptcha(user, pwd1);
+ }
+ else {
+ submitPassword(user, pwd1);
+ }
+};
+
+matrixRegistration.onRegistered = function(hs_url, user_id, access_token) {
+ // clobber this function
+ console.log("onRegistered - This function should be replaced to proceed.");
+};
diff --git a/synapse/static/client/register/register_config.sample.js b/synapse/static/client/register/register_config.sample.js
new file mode 100644
index 00000000..c7ea180d
--- /dev/null
+++ b/synapse/static/client/register/register_config.sample.js
@@ -0,0 +1,3 @@
+window.matrixRegistrationConfig = {
+ recaptcha_public_key: "YOUR_PUBLIC_KEY"
+};
diff --git a/synapse/static/client/register/style.css b/synapse/static/client/register/style.css
new file mode 100644
index 00000000..5a7b6eeb
--- /dev/null
+++ b/synapse/static/client/register/style.css
@@ -0,0 +1,60 @@
+html {
+ height: 100%;
+}
+
+body {
+ height: 100%;
+ font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
+ font-size: 12pt;
+ margin: 0px;
+}
+
+h1 {
+ font-size: 20pt;
+}
+
+a:link { color: #666; }
+a:visited { color: #666; }
+a:hover { color: #000; }
+a:active { color: #000; }
+
+input {
+ width: 100%
+}
+
+textarea, input {
+ font-family: inherit;
+ font-size: inherit;
+}
+
+.smallPrint {
+ color: #888;
+ font-size: 9pt ! important;
+ font-style: italic ! important;
+}
+
+#recaptcha_area {
+ margin: auto
+}
+
+.g-recaptcha div {
+ margin: auto;
+}
+
+#registrationForm {
+ text-align: left;
+ padding: 5px;
+ margin-bottom: 40px;
+ display: inline-block;
+
+ -webkit-border-radius: 10px;
+ -moz-border-radius: 10px;
+ border-radius: 10px;
+
+ -webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+ -moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+ box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
+
+ background-color: #f8f8f8;
+ border: 1px #ccc solid;
+} \ No newline at end of file
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
new file mode 100644
index 00000000..e7443f28
--- /dev/null
+++ b/synapse/storage/__init__.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from .appservice import (
+ ApplicationServiceStore, ApplicationServiceTransactionStore
+)
+from ._base import Cache
+from .directory import DirectoryStore
+from .events import EventsStore
+from .presence import PresenceStore
+from .profile import ProfileStore
+from .registration import RegistrationStore
+from .room import RoomStore
+from .roommember import RoomMemberStore
+from .stream import StreamStore
+from .transactions import TransactionStore
+from .keys import KeyStore
+from .event_federation import EventFederationStore
+from .pusher import PusherStore
+from .push_rule import PushRuleStore
+from .media_repository import MediaRepositoryStore
+from .rejections import RejectionsStore
+
+from .state import StateStore
+from .signatures import SignatureStore
+from .filtering import FilteringStore
+from .end_to_end_keys import EndToEndKeyStore
+
+from .receipts import ReceiptsStore
+from .search import SearchStore
+from .tags import TagsStore
+
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+# Number of msec of granularity to store the user IP 'last seen' time. Smaller
+# times give more inserts into the database even for readonly API hits
+# 120 seconds == 2 minutes
+LAST_SEEN_GRANULARITY = 120*1000
+
+
+class DataStore(RoomMemberStore, RoomStore,
+ RegistrationStore, StreamStore, ProfileStore,
+ PresenceStore, TransactionStore,
+ DirectoryStore, KeyStore, StateStore, SignatureStore,
+ ApplicationServiceStore,
+ EventFederationStore,
+ MediaRepositoryStore,
+ RejectionsStore,
+ FilteringStore,
+ PusherStore,
+ PushRuleStore,
+ ApplicationServiceTransactionStore,
+ EventsStore,
+ ReceiptsStore,
+ EndToEndKeyStore,
+ SearchStore,
+ TagsStore,
+ ):
+
+ def __init__(self, hs):
+ super(DataStore, self).__init__(hs)
+ self.hs = hs
+
+ self.min_token_deferred = self._get_min_token()
+ self.min_token = None
+
+ self.client_ip_last_seen = Cache(
+ name="client_ip_last_seen",
+ keylen=4,
+ )
+
+ @defer.inlineCallbacks
+ def insert_client_ip(self, user, access_token, ip, user_agent):
+ now = int(self._clock.time_msec())
+ key = (user.to_string(), access_token, ip)
+
+ try:
+ last_seen = self.client_ip_last_seen.get(key)
+ except KeyError:
+ last_seen = None
+
+ # Rate-limited inserts
+ if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
+ defer.returnValue(None)
+
+ self.client_ip_last_seen.prefill(key, now)
+
+ # It's safe not to lock here: a) no unique constraint,
+ # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
+ yield self._simple_upsert(
+ "user_ips",
+ keyvalues={
+ "user_id": user.to_string(),
+ "access_token": access_token,
+ "ip": ip,
+ "user_agent": user_agent,
+ },
+ values={
+ "last_seen": now,
+ },
+ desc="insert_client_ip",
+ lock=False,
+ )
+
+ @defer.inlineCallbacks
+ def count_daily_users(self):
+ """
+ Counts the number of users who used this homeserver in the last 24 hours.
+ """
+ def _count_users(txn):
+ txn.execute(
+ "SELECT COUNT(DISTINCT user_id) AS users"
+ " FROM user_ips"
+ " WHERE last_seen > ?",
+ # This is close enough to a day for our purposes.
+ (int(self._clock.time_msec()) - (1000 * 60 * 60 * 24),)
+ )
+ rows = self.cursor_to_dict(txn)
+ if rows:
+ return rows[0]["users"]
+ return 0
+
+ ret = yield self.runInteraction("count_users", _count_users)
+ defer.returnValue(ret)
+
+ def get_user_ip_and_agents(self, user):
+ return self._simple_select_list(
+ table="user_ips",
+ keyvalues={"user_id": user.to_string()},
+ retcols=[
+ "access_token", "ip", "user_agent", "last_seen"
+ ],
+ desc="get_user_ip_and_agents",
+ )
+
+
+def are_all_users_on_domain(txn, database_engine, domain):
+ sql = database_engine.convert_param_style(
+ "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?"
+ )
+ pat = "%:" + domain
+ txn.execute(sql, (pat,))
+ num_not_matching = txn.fetchall()[0][0]
+ if num_not_matching == 0:
+ return True
+ return False
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
new file mode 100644
index 00000000..218e7080
--- /dev/null
+++ b/synapse/storage/_base.py
@@ -0,0 +1,729 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from synapse.api.errors import StoreError
+from synapse.util.logutils import log_function
+from synapse.util.logcontext import preserve_context_over_fn, LoggingContext
+from synapse.util.caches.dictionary_cache import DictionaryCache
+from synapse.util.caches.descriptors import Cache
+import synapse.metrics
+
+from util.id_generators import IdGenerator, StreamIdGenerator
+
+from twisted.internet import defer
+
+import sys
+import time
+import threading
+
+
+logger = logging.getLogger(__name__)
+
+sql_logger = logging.getLogger("synapse.storage.SQL")
+transaction_logger = logging.getLogger("synapse.storage.txn")
+perf_logger = logging.getLogger("synapse.storage.TIME")
+
+
+metrics = synapse.metrics.get_metrics_for("synapse.storage")
+
+sql_scheduling_timer = metrics.register_distribution("schedule_time")
+
+sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
+sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])
+
+
+class LoggingTransaction(object):
+ """An object that almost-transparently proxies for the 'txn' object
+ passed to the constructor. Adds logging and metrics to the .execute()
+ method."""
+ __slots__ = ["txn", "name", "database_engine", "after_callbacks"]
+
+ def __init__(self, txn, name, database_engine, after_callbacks):
+ object.__setattr__(self, "txn", txn)
+ object.__setattr__(self, "name", name)
+ object.__setattr__(self, "database_engine", database_engine)
+ object.__setattr__(self, "after_callbacks", after_callbacks)
+
+ def call_after(self, callback, *args):
+ """Call the given callback on the main twisted thread after the
+ transaction has finished. Used to invalidate the caches on the
+ correct thread.
+ """
+ self.after_callbacks.append((callback, args))
+
+ def __getattr__(self, name):
+ return getattr(self.txn, name)
+
+ def __setattr__(self, name, value):
+ setattr(self.txn, name, value)
+
+ def execute(self, sql, *args):
+ self._do_execute(self.txn.execute, sql, *args)
+
+ def executemany(self, sql, *args):
+ self._do_execute(self.txn.executemany, sql, *args)
+
+ def _do_execute(self, func, sql, *args):
+ # TODO(paul): Maybe use 'info' and 'debug' for values?
+ sql_logger.debug("[SQL] {%s} %s", self.name, sql)
+
+ sql = self.database_engine.convert_param_style(sql)
+
+ if args:
+ try:
+ sql_logger.debug(
+ "[SQL values] {%s} %r",
+ self.name, args[0]
+ )
+ except:
+ # Don't let logging failures stop SQL from working
+ pass
+
+ start = time.time() * 1000
+
+ try:
+ return func(
+ sql, *args
+ )
+ except Exception as e:
+ logger.debug("[SQL FAIL] {%s} %s", self.name, e)
+ raise
+ finally:
+ msecs = (time.time() * 1000) - start
+ sql_logger.debug("[SQL time] {%s} %f", self.name, msecs)
+ sql_query_timer.inc_by(msecs, sql.split()[0])
+
+
+class PerformanceCounters(object):
+ def __init__(self):
+ self.current_counters = {}
+ self.previous_counters = {}
+
+ def update(self, key, start_time, end_time=None):
+ if end_time is None:
+ end_time = time.time() * 1000
+ duration = end_time - start_time
+ count, cum_time = self.current_counters.get(key, (0, 0))
+ count += 1
+ cum_time += duration
+ self.current_counters[key] = (count, cum_time)
+ return end_time
+
+ def interval(self, interval_duration, limit=3):
+ counters = []
+ for name, (count, cum_time) in self.current_counters.items():
+ prev_count, prev_time = self.previous_counters.get(name, (0, 0))
+ counters.append((
+ (cum_time - prev_time) / interval_duration,
+ count - prev_count,
+ name
+ ))
+
+ self.previous_counters = dict(self.current_counters)
+
+ counters.sort(reverse=True)
+
+ top_n_counters = ", ".join(
+ "%s(%d): %.3f%%" % (name, count, 100 * ratio)
+ for ratio, count, name in counters[:limit]
+ )
+
+ return top_n_counters
+
+
+class SQLBaseStore(object):
+ _TXN_ID = 0
+
+ def __init__(self, hs):
+ self.hs = hs
+ self._db_pool = hs.get_db_pool()
+ self._clock = hs.get_clock()
+
+ self._previous_txn_total_time = 0
+ self._current_txn_total_time = 0
+ self._previous_loop_ts = 0
+
+ # TODO(paul): These can eventually be removed once the metrics code
+ # is running in mainline, and we have some nice monitoring frontends
+ # to watch it
+ self._txn_perf_counters = PerformanceCounters()
+ self._get_event_counters = PerformanceCounters()
+
+ self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
+ max_entries=hs.config.event_cache_size)
+
+ self._state_group_cache = DictionaryCache("*stateGroupCache*", 2000)
+
+ self._event_fetch_lock = threading.Condition()
+ self._event_fetch_list = []
+ self._event_fetch_ongoing = 0
+
+ self._pending_ds = []
+
+ self.database_engine = hs.database_engine
+
+ self._stream_id_gen = StreamIdGenerator("events", "stream_ordering")
+ self._transaction_id_gen = IdGenerator("sent_transactions", "id", self)
+ self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
+ self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
+ self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self)
+ self._pushers_id_gen = IdGenerator("pushers", "id", self)
+ self._push_rule_id_gen = IdGenerator("push_rules", "id", self)
+ self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self)
+ self._receipts_id_gen = StreamIdGenerator("receipts_linearized", "stream_id")
+
+ def start_profiling(self):
+ self._previous_loop_ts = self._clock.time_msec()
+
+ def loop():
+ curr = self._current_txn_total_time
+ prev = self._previous_txn_total_time
+ self._previous_txn_total_time = curr
+
+ time_now = self._clock.time_msec()
+ time_then = self._previous_loop_ts
+ self._previous_loop_ts = time_now
+
+ ratio = (curr - prev)/(time_now - time_then)
+
+ top_three_counters = self._txn_perf_counters.interval(
+ time_now - time_then, limit=3
+ )
+
+ top_3_event_counters = self._get_event_counters.interval(
+ time_now - time_then, limit=3
+ )
+
+ perf_logger.info(
+ "Total database time: %.3f%% {%s} {%s}",
+ ratio * 100, top_three_counters, top_3_event_counters
+ )
+
+ self._clock.looping_call(loop, 10000)
+
+ def _new_transaction(self, conn, desc, after_callbacks, func, *args, **kwargs):
+ start = time.time() * 1000
+ txn_id = self._TXN_ID
+
+ # We don't really need these to be unique, so lets stop it from
+ # growing really large.
+ self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)
+
+ name = "%s-%x" % (desc, txn_id, )
+
+ transaction_logger.debug("[TXN START] {%s}", name)
+
+ try:
+ i = 0
+ N = 5
+ while True:
+ try:
+ txn = conn.cursor()
+ txn = LoggingTransaction(
+ txn, name, self.database_engine, after_callbacks
+ )
+ r = func(txn, *args, **kwargs)
+ conn.commit()
+ return r
+ except self.database_engine.module.OperationalError as e:
+ # This can happen if the database disappears mid
+ # transaction.
+ logger.warn(
+ "[TXN OPERROR] {%s} %s %d/%d",
+ name, e, i, N
+ )
+ if i < N:
+ i += 1
+ try:
+ conn.rollback()
+ except self.database_engine.module.Error as e1:
+ logger.warn(
+ "[TXN EROLL] {%s} %s",
+ name, e1,
+ )
+ continue
+ raise
+ except self.database_engine.module.DatabaseError as e:
+ if self.database_engine.is_deadlock(e):
+ logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
+ if i < N:
+ i += 1
+ try:
+ conn.rollback()
+ except self.database_engine.module.Error as e1:
+ logger.warn(
+ "[TXN EROLL] {%s} %s",
+ name, e1,
+ )
+ continue
+ raise
+ except Exception as e:
+ logger.debug("[TXN FAIL] {%s} %s", name, e)
+ raise
+ finally:
+ end = time.time() * 1000
+ duration = end - start
+
+ transaction_logger.debug("[TXN END] {%s} %f", name, duration)
+
+ self._current_txn_total_time += duration
+ self._txn_perf_counters.update(desc, start, end)
+ sql_txn_timer.inc_by(duration, desc)
+
+ @defer.inlineCallbacks
+ def runInteraction(self, desc, func, *args, **kwargs):
+ """Wraps the .runInteraction() method on the underlying db_pool."""
+ current_context = LoggingContext.current_context()
+
+ start_time = time.time() * 1000
+
+ after_callbacks = []
+
+ def inner_func(conn, *args, **kwargs):
+ with LoggingContext("runInteraction") as context:
+ sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
+
+ if self.database_engine.is_connection_closed(conn):
+ logger.debug("Reconnecting closed database connection")
+ conn.reconnect()
+
+ current_context.copy_to(context)
+ return self._new_transaction(
+ conn, desc, after_callbacks, func, *args, **kwargs
+ )
+
+ result = yield preserve_context_over_fn(
+ self._db_pool.runWithConnection,
+ inner_func, *args, **kwargs
+ )
+
+ for after_callback, after_args in after_callbacks:
+ after_callback(*after_args)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def runWithConnection(self, func, *args, **kwargs):
+ """Wraps the .runInteraction() method on the underlying db_pool."""
+ current_context = LoggingContext.current_context()
+
+ start_time = time.time() * 1000
+
+ def inner_func(conn, *args, **kwargs):
+ with LoggingContext("runWithConnection") as context:
+ sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
+
+ if self.database_engine.is_connection_closed(conn):
+ logger.debug("Reconnecting closed database connection")
+ conn.reconnect()
+
+ current_context.copy_to(context)
+
+ return func(conn, *args, **kwargs)
+
+ result = yield preserve_context_over_fn(
+ self._db_pool.runWithConnection,
+ inner_func, *args, **kwargs
+ )
+
+ defer.returnValue(result)
+
+ def cursor_to_dict(self, cursor):
+ """Converts a SQL cursor into an list of dicts.
+
+ Args:
+ cursor : The DBAPI cursor which has executed a query.
+ Returns:
+ A list of dicts where the key is the column header.
+ """
+ col_headers = list(column[0] for column in cursor.description)
+ results = list(
+ dict(zip(col_headers, row)) for row in cursor.fetchall()
+ )
+ return results
+
+ def _execute(self, desc, decoder, query, *args):
+ """Runs a single query for a result set.
+
+ Args:
+ decoder - The function which can resolve the cursor results to
+ something meaningful.
+ query - The query string to execute
+ *args - Query args.
+ Returns:
+ The result of decoder(results)
+ """
+ def interaction(txn):
+ txn.execute(query, args)
+ if decoder:
+ return decoder(txn)
+ else:
+ return txn.fetchall()
+
+ return self.runInteraction(desc, interaction)
+
+ # "Simple" SQL API methods that operate on a single table with no JOINs,
+ # no complex WHERE clauses, just a dict of values for columns.
+
+ @defer.inlineCallbacks
+ def _simple_insert(self, table, values, or_ignore=False,
+ desc="_simple_insert"):
+ """Executes an INSERT query on the named table.
+
+ Args:
+ table : string giving the table name
+ values : dict of new column names and values for them
+ """
+ try:
+ yield self.runInteraction(
+ desc,
+ self._simple_insert_txn, table, values,
+ )
+ except self.database_engine.module.IntegrityError:
+ # We have to do or_ignore flag at this layer, since we can't reuse
+ # a cursor after we receive an error from the db.
+ if not or_ignore:
+ raise
+
+ @log_function
+ def _simple_insert_txn(self, txn, table, values):
+ keys, vals = zip(*values.items())
+
+ sql = "INSERT INTO %s (%s) VALUES(%s)" % (
+ table,
+ ", ".join(k for k in keys),
+ ", ".join("?" for _ in keys)
+ )
+
+ txn.execute(sql, vals)
+
+ def _simple_insert_many_txn(self, txn, table, values):
+ if not values:
+ return
+
+ # This is a *slight* abomination to get a list of tuples of key names
+ # and a list of tuples of value names.
+ #
+ # i.e. [{"a": 1, "b": 2}, {"c": 3, "d": 4}]
+ # => [("a", "b",), ("c", "d",)] and [(1, 2,), (3, 4,)]
+ #
+ # The sort is to ensure that we don't rely on dictionary iteration
+ # order.
+ keys, vals = zip(*[
+ zip(
+ *(sorted(i.items(), key=lambda kv: kv[0]))
+ )
+ for i in values
+ if i
+ ])
+
+ for k in keys:
+ if k != keys[0]:
+ raise RuntimeError(
+ "All items must have the same keys"
+ )
+
+ sql = "INSERT INTO %s (%s) VALUES(%s)" % (
+ table,
+ ", ".join(k for k in keys[0]),
+ ", ".join("?" for _ in keys[0])
+ )
+
+ txn.executemany(sql, vals)
+
+ def _simple_upsert(self, table, keyvalues, values,
+ insertion_values={}, desc="_simple_upsert", lock=True):
+ """
+ Args:
+ table (str): The table to upsert into
+ keyvalues (dict): The unique key tables and their new values
+ values (dict): The nonunique columns and their new values
+ insertion_values (dict): key/values to use when inserting
+ Returns: A deferred
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_upsert_txn, table, keyvalues, values, insertion_values,
+ lock
+ )
+
+ def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
+ lock=True):
+ # We need to lock the table :(, unless we're *really* careful
+ if lock:
+ self.database_engine.lock_table(txn, table)
+
+ # Try to update
+ sql = "UPDATE %s SET %s WHERE %s" % (
+ table,
+ ", ".join("%s = ?" % (k,) for k in values),
+ " AND ".join("%s = ?" % (k,) for k in keyvalues)
+ )
+ sqlargs = values.values() + keyvalues.values()
+ logger.debug(
+ "[SQL] %s Args=%s",
+ sql, sqlargs,
+ )
+
+ txn.execute(sql, sqlargs)
+ if txn.rowcount == 0:
+ # We didn't update and rows so insert a new one
+ allvalues = {}
+ allvalues.update(keyvalues)
+ allvalues.update(values)
+ allvalues.update(insertion_values)
+
+ sql = "INSERT INTO %s (%s) VALUES (%s)" % (
+ table,
+ ", ".join(k for k in allvalues),
+ ", ".join("?" for _ in allvalues)
+ )
+ logger.debug(
+ "[SQL] %s Args=%s",
+ sql, keyvalues.values(),
+ )
+ txn.execute(sql, allvalues.values())
+
+ def _simple_select_one(self, table, keyvalues, retcols,
+ allow_none=False, desc="_simple_select_one"):
+ """Executes a SELECT query on the named table, which is expected to
+ return a single row, returning a single column from it.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the row with
+ retcols : list of strings giving the names of the columns to return
+
+ allow_none : If true, return None instead of failing if the SELECT
+ statement returns no rows
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_select_one_txn,
+ table, keyvalues, retcols, allow_none,
+ )
+
+ def _simple_select_one_onecol(self, table, keyvalues, retcol,
+ allow_none=False,
+ desc="_simple_select_one_onecol"):
+ """Executes a SELECT query on the named table, which is expected to
+ return a single row, returning a single column from it.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the row with
+ retcol : string giving the name of the column to return
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_select_one_onecol_txn,
+ table, keyvalues, retcol, allow_none=allow_none,
+ )
+
+ def _simple_select_one_onecol_txn(self, txn, table, keyvalues, retcol,
+ allow_none=False):
+ ret = self._simple_select_onecol_txn(
+ txn,
+ table=table,
+ keyvalues=keyvalues,
+ retcol=retcol,
+ )
+
+ if ret:
+ return ret[0]
+ else:
+ if allow_none:
+ return None
+ else:
+ raise StoreError(404, "No row found")
+
+ def _simple_select_onecol_txn(self, txn, table, keyvalues, retcol):
+ sql = (
+ "SELECT %(retcol)s FROM %(table)s WHERE %(where)s"
+ ) % {
+ "retcol": retcol,
+ "table": table,
+ "where": " AND ".join("%s = ?" % k for k in keyvalues.keys()),
+ }
+
+ txn.execute(sql, keyvalues.values())
+
+ return [r[0] for r in txn.fetchall()]
+
+ def _simple_select_onecol(self, table, keyvalues, retcol,
+ desc="_simple_select_onecol"):
+ """Executes a SELECT query on the named table, which returns a list
+ comprising of the values of the named column from the selected rows.
+
+ Args:
+ table (str): table name
+ keyvalues (dict): column names and values to select the rows with
+ retcol (str): column whos value we wish to retrieve.
+
+ Returns:
+ Deferred: Results in a list
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_select_onecol_txn,
+ table, keyvalues, retcol
+ )
+
+ def _simple_select_list(self, table, keyvalues, retcols,
+ desc="_simple_select_list"):
+ """Executes a SELECT query on the named table, which may return zero or
+ more rows, returning the result as a list of dicts.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the rows with,
+ or None to not apply a WHERE clause.
+ retcols : list of strings giving the names of the columns to return
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_select_list_txn,
+ table, keyvalues, retcols
+ )
+
+ def _simple_select_list_txn(self, txn, table, keyvalues, retcols):
+ """Executes a SELECT query on the named table, which may return zero or
+ more rows, returning the result as a list of dicts.
+
+ Args:
+ txn : Transaction object
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the rows with
+ retcols : list of strings giving the names of the columns to return
+ """
+ if keyvalues:
+ sql = "SELECT %s FROM %s WHERE %s" % (
+ ", ".join(retcols),
+ table,
+ " AND ".join("%s = ?" % (k, ) for k in keyvalues)
+ )
+ txn.execute(sql, keyvalues.values())
+ else:
+ sql = "SELECT %s FROM %s" % (
+ ", ".join(retcols),
+ table
+ )
+ txn.execute(sql)
+
+ return self.cursor_to_dict(txn)
+
+ def _simple_update_one(self, table, keyvalues, updatevalues,
+ desc="_simple_update_one"):
+ """Executes an UPDATE query on the named table, setting new values for
+ columns in a row matching the key values.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the row with
+ updatevalues : dict giving column names and values to update
+ retcols : optional list of column names to return
+
+ If present, retcols gives a list of column names on which to perform
+ a SELECT statement *before* performing the UPDATE statement. The values
+ of these will be returned in a dict.
+
+ These are performed within the same transaction, allowing an atomic
+ get-and-set. This can be used to implement compare-and-set by putting
+ the update column in the 'keyvalues' dict as well.
+ """
+ return self.runInteraction(
+ desc,
+ self._simple_update_one_txn,
+ table, keyvalues, updatevalues,
+ )
+
+ def _simple_update_one_txn(self, txn, table, keyvalues, updatevalues):
+ update_sql = "UPDATE %s SET %s WHERE %s" % (
+ table,
+ ", ".join("%s = ?" % (k,) for k in updatevalues),
+ " AND ".join("%s = ?" % (k,) for k in keyvalues)
+ )
+
+ txn.execute(
+ update_sql,
+ updatevalues.values() + keyvalues.values()
+ )
+
+ if txn.rowcount == 0:
+ raise StoreError(404, "No row found")
+ if txn.rowcount > 1:
+ raise StoreError(500, "More than one row matched")
+
+ def _simple_select_one_txn(self, txn, table, keyvalues, retcols,
+ allow_none=False):
+ select_sql = "SELECT %s FROM %s WHERE %s" % (
+ ", ".join(retcols),
+ table,
+ " AND ".join("%s = ?" % (k,) for k in keyvalues)
+ )
+
+ txn.execute(select_sql, keyvalues.values())
+
+ row = txn.fetchone()
+ if not row:
+ if allow_none:
+ return None
+ raise StoreError(404, "No row found")
+ if txn.rowcount > 1:
+ raise StoreError(500, "More than one row matched")
+
+ return dict(zip(retcols, row))
+
+ def _simple_delete_one(self, table, keyvalues, desc="_simple_delete_one"):
+ """Executes a DELETE query on the named table, expecting to delete a
+ single row.
+
+ Args:
+ table : string giving the table name
+ keyvalues : dict of column names and values to select the row with
+ """
+ sql = "DELETE FROM %s WHERE %s" % (
+ table,
+ " AND ".join("%s = ?" % (k, ) for k in keyvalues)
+ )
+
+ def func(txn):
+ txn.execute(sql, keyvalues.values())
+ if txn.rowcount == 0:
+ raise StoreError(404, "No row found")
+ if txn.rowcount > 1:
+ raise StoreError(500, "more than one row matched")
+ return self.runInteraction(desc, func)
+
+ def _simple_delete_txn(self, txn, table, keyvalues):
+ sql = "DELETE FROM %s WHERE %s" % (
+ table,
+ " AND ".join("%s = ?" % (k, ) for k in keyvalues)
+ )
+
+ return txn.execute(sql, keyvalues.values())
+
+ def get_next_stream_id(self):
+ with self._next_stream_id_lock:
+ i = self._next_stream_id
+ self._next_stream_id += 1
+ return i
+
+
+class _RollbackButIsFineException(Exception):
+ """ This exception is used to rollback a transaction without implying
+ something went wrong.
+ """
+ pass
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
new file mode 100644
index 00000000..39b7881c
--- /dev/null
+++ b/synapse/storage/appservice.py
@@ -0,0 +1,471 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import urllib
+import yaml
+from simplejson import JSONDecodeError
+import simplejson as json
+from twisted.internet import defer
+
+from synapse.api.constants import Membership
+from synapse.appservice import ApplicationService, AppServiceTransaction
+from synapse.storage.roommember import RoomsForUser
+from synapse.types import UserID
+from ._base import SQLBaseStore
+
+
+logger = logging.getLogger(__name__)
+
+
+class ApplicationServiceStore(SQLBaseStore):
+
+ def __init__(self, hs):
+ super(ApplicationServiceStore, self).__init__(hs)
+ self.hostname = hs.hostname
+ self.services_cache = []
+ self._populate_appservice_cache(
+ hs.config.app_service_config_files
+ )
+
+ def get_app_services(self):
+ return defer.succeed(self.services_cache)
+
+ def get_app_service_by_user_id(self, user_id):
+ """Retrieve an application service from their user ID.
+
+ All application services have associated with them a particular user ID.
+ There is no distinguishing feature on the user ID which indicates it
+ represents an application service. This function allows you to map from
+ a user ID to an application service.
+
+ Args:
+ user_id(str): The user ID to see if it is an application service.
+ Returns:
+ synapse.appservice.ApplicationService or None.
+ """
+ for service in self.services_cache:
+ if service.sender == user_id:
+ return defer.succeed(service)
+ return defer.succeed(None)
+
+ def get_app_service_by_token(self, token):
+ """Get the application service with the given appservice token.
+
+ Args:
+ token (str): The application service token.
+ Returns:
+ synapse.appservice.ApplicationService or None.
+ """
+ for service in self.services_cache:
+ if service.token == token:
+ return defer.succeed(service)
+ return defer.succeed(None)
+
+ def get_app_service_rooms(self, service):
+ """Get a list of RoomsForUser for this application service.
+
+ Application services may be "interested" in lots of rooms depending on
+ the room ID, the room aliases, or the members in the room. This function
+ takes all of these into account and returns a list of RoomsForUser which
+ represent the entire list of room IDs that this application service
+ wants to know about.
+
+ Args:
+ service: The application service to get a room list for.
+ Returns:
+ A list of RoomsForUser.
+ """
+ return self.runInteraction(
+ "get_app_service_rooms",
+ self._get_app_service_rooms_txn,
+ service,
+ )
+
+ def _get_app_service_rooms_txn(self, txn, service):
+ # get all rooms matching the room ID regex.
+ room_entries = self._simple_select_list_txn(
+ txn=txn, table="rooms", keyvalues=None, retcols=["room_id"]
+ )
+ matching_room_list = set([
+ r["room_id"] for r in room_entries if
+ service.is_interested_in_room(r["room_id"])
+ ])
+
+ # resolve room IDs for matching room alias regex.
+ room_alias_mappings = self._simple_select_list_txn(
+ txn=txn, table="room_aliases", keyvalues=None,
+ retcols=["room_id", "room_alias"]
+ )
+ matching_room_list |= set([
+ r["room_id"] for r in room_alias_mappings if
+ service.is_interested_in_alias(r["room_alias"])
+ ])
+
+ # get all rooms for every user for this AS. This is scoped to users on
+ # this HS only.
+ user_list = self._simple_select_list_txn(
+ txn=txn, table="users", keyvalues=None, retcols=["name"]
+ )
+ user_list = [
+ u["name"] for u in user_list if
+ service.is_interested_in_user(u["name"])
+ ]
+ rooms_for_user_matching_user_id = set() # RoomsForUser list
+ for user_id in user_list:
+ # FIXME: This assumes this store is linked with RoomMemberStore :(
+ rooms_for_user = self._get_rooms_for_user_where_membership_is_txn(
+ txn=txn,
+ user_id=user_id,
+ membership_list=[Membership.JOIN]
+ )
+ rooms_for_user_matching_user_id |= set(rooms_for_user)
+
+ # make RoomsForUser tuples for room ids and aliases which are not in the
+ # main rooms_for_user_list - e.g. they are rooms which do not have AS
+ # registered users in it.
+ known_room_ids = [r.room_id for r in rooms_for_user_matching_user_id]
+ missing_rooms_for_user = [
+ RoomsForUser(r, service.sender, "join") for r in
+ matching_room_list if r not in known_room_ids
+ ]
+ rooms_for_user_matching_user_id |= set(missing_rooms_for_user)
+
+ return rooms_for_user_matching_user_id
+
+ def _parse_services_dict(self, results):
+ # SQL results in the form:
+ # [
+ # {
+ # 'regex': "something",
+ # 'url': "something",
+ # 'namespace': enum,
+ # 'as_id': 0,
+ # 'token': "something",
+ # 'hs_token': "otherthing",
+ # 'id': 0
+ # }
+ # ]
+ services = {}
+ for res in results:
+ as_token = res["token"]
+ if as_token is None:
+ continue
+ if as_token not in services:
+ # add the service
+ services[as_token] = {
+ "id": res["id"],
+ "url": res["url"],
+ "token": as_token,
+ "hs_token": res["hs_token"],
+ "sender": res["sender"],
+ "namespaces": {
+ ApplicationService.NS_USERS: [],
+ ApplicationService.NS_ALIASES: [],
+ ApplicationService.NS_ROOMS: []
+ }
+ }
+ # add the namespace regex if one exists
+ ns_int = res["namespace"]
+ if ns_int is None:
+ continue
+ try:
+ services[as_token]["namespaces"][
+ ApplicationService.NS_LIST[ns_int]].append(
+ json.loads(res["regex"])
+ )
+ except IndexError:
+ logger.error("Bad namespace enum '%s'. %s", ns_int, res)
+ except JSONDecodeError:
+ logger.error("Bad regex object '%s'", res["regex"])
+
+ service_list = []
+ for service in services.values():
+ service_list.append(ApplicationService(
+ token=service["token"],
+ url=service["url"],
+ namespaces=service["namespaces"],
+ hs_token=service["hs_token"],
+ sender=service["sender"],
+ id=service["id"]
+ ))
+ return service_list
+
+ def _load_appservice(self, as_info):
+ required_string_fields = [
+ "url", "as_token", "hs_token", "sender_localpart"
+ ]
+ for field in required_string_fields:
+ if not isinstance(as_info.get(field), basestring):
+ raise KeyError("Required string field: '%s'", field)
+
+ localpart = as_info["sender_localpart"]
+ if urllib.quote(localpart) != localpart:
+ raise ValueError(
+ "sender_localpart needs characters which are not URL encoded."
+ )
+ user = UserID(localpart, self.hostname)
+ user_id = user.to_string()
+
+ # namespace checks
+ if not isinstance(as_info.get("namespaces"), dict):
+ raise KeyError("Requires 'namespaces' object.")
+ for ns in ApplicationService.NS_LIST:
+ # specific namespaces are optional
+ if ns in as_info["namespaces"]:
+ # expect a list of dicts with exclusive and regex keys
+ for regex_obj in as_info["namespaces"][ns]:
+ if not isinstance(regex_obj, dict):
+ raise ValueError(
+ "Expected namespace entry in %s to be an object,"
+ " but got %s", ns, regex_obj
+ )
+ if not isinstance(regex_obj.get("regex"), basestring):
+ raise ValueError(
+ "Missing/bad type 'regex' key in %s", regex_obj
+ )
+ if not isinstance(regex_obj.get("exclusive"), bool):
+ raise ValueError(
+ "Missing/bad type 'exclusive' key in %s", regex_obj
+ )
+ return ApplicationService(
+ token=as_info["as_token"],
+ url=as_info["url"],
+ namespaces=as_info["namespaces"],
+ hs_token=as_info["hs_token"],
+ sender=user_id,
+ id=as_info["as_token"] # the token is the only unique thing here
+ )
+
+ def _populate_appservice_cache(self, config_files):
+ """Populates a cache of Application Services from the config files."""
+ if not isinstance(config_files, list):
+ logger.warning(
+ "Expected %s to be a list of AS config files.", config_files
+ )
+ return
+
+ for config_file in config_files:
+ try:
+ with open(config_file, 'r') as f:
+ appservice = self._load_appservice(yaml.load(f))
+ logger.info("Loaded application service: %s", appservice)
+ self.services_cache.append(appservice)
+ except Exception as e:
+ logger.error("Failed to load appservice from '%s'", config_file)
+ logger.exception(e)
+
+
+class ApplicationServiceTransactionStore(SQLBaseStore):
+
+ def __init__(self, hs):
+ super(ApplicationServiceTransactionStore, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def get_appservices_by_state(self, state):
+ """Get a list of application services based on their state.
+
+ Args:
+ state(ApplicationServiceState): The state to filter on.
+ Returns:
+ A Deferred which resolves to a list of ApplicationServices, which
+ may be empty.
+ """
+ results = yield self._simple_select_list(
+ "application_services_state",
+ dict(state=state),
+ ["as_id"]
+ )
+ # NB: This assumes this class is linked with ApplicationServiceStore
+ as_list = yield self.get_app_services()
+ services = []
+
+ for res in results:
+ for service in as_list:
+ if service.id == res["as_id"]:
+ services.append(service)
+ defer.returnValue(services)
+
+ @defer.inlineCallbacks
+ def get_appservice_state(self, service):
+ """Get the application service state.
+
+ Args:
+ service(ApplicationService): The service whose state to set.
+ Returns:
+ A Deferred which resolves to ApplicationServiceState.
+ """
+ result = yield self._simple_select_one(
+ "application_services_state",
+ dict(as_id=service.id),
+ ["state"],
+ allow_none=True
+ )
+ if result:
+ defer.returnValue(result.get("state"))
+ return
+ defer.returnValue(None)
+
+ def set_appservice_state(self, service, state):
+ """Set the application service state.
+
+ Args:
+ service(ApplicationService): The service whose state to set.
+ state(ApplicationServiceState): The connectivity state to apply.
+ Returns:
+ A Deferred which resolves when the state was set successfully.
+ """
+ return self._simple_upsert(
+ "application_services_state",
+ dict(as_id=service.id),
+ dict(state=state)
+ )
+
+ def create_appservice_txn(self, service, events):
+ """Atomically creates a new transaction for this application service
+ with the given list of events.
+
+ Args:
+ service(ApplicationService): The service who the transaction is for.
+ events(list<Event>): A list of events to put in the transaction.
+ Returns:
+ AppServiceTransaction: A new transaction.
+ """
+ return self.runInteraction(
+ "create_appservice_txn",
+ self._create_appservice_txn,
+ service, events
+ )
+
+ def _create_appservice_txn(self, txn, service, events):
+ # work out new txn id (highest txn id for this service += 1)
+ # The highest id may be the last one sent (in which case it is last_txn)
+ # or it may be the highest in the txns list (which are waiting to be/are
+ # being sent)
+ last_txn_id = self._get_last_txn(txn, service.id)
+
+ txn.execute(
+ "SELECT MAX(txn_id) FROM application_services_txns WHERE as_id=?",
+ (service.id,)
+ )
+ highest_txn_id = txn.fetchone()[0]
+ if highest_txn_id is None:
+ highest_txn_id = 0
+
+ new_txn_id = max(highest_txn_id, last_txn_id) + 1
+
+ # Insert new txn into txn table
+ event_ids = json.dumps([e.event_id for e in events])
+ txn.execute(
+ "INSERT INTO application_services_txns(as_id, txn_id, event_ids) "
+ "VALUES(?,?,?)",
+ (service.id, new_txn_id, event_ids)
+ )
+ return AppServiceTransaction(
+ service=service, id=new_txn_id, events=events
+ )
+
+ def complete_appservice_txn(self, txn_id, service):
+ """Completes an application service transaction.
+
+ Args:
+ txn_id(str): The transaction ID being completed.
+ service(ApplicationService): The application service which was sent
+ this transaction.
+ Returns:
+ A Deferred which resolves if this transaction was stored
+ successfully.
+ """
+ return self.runInteraction(
+ "complete_appservice_txn",
+ self._complete_appservice_txn,
+ txn_id, service
+ )
+
+ def _complete_appservice_txn(self, txn, txn_id, service):
+ txn_id = int(txn_id)
+
+ # Debugging query: Make sure the txn being completed is EXACTLY +1 from
+ # what was there before. If it isn't, we've got problems (e.g. the AS
+ # has probably missed some events), so whine loudly but still continue,
+ # since it shouldn't fail completion of the transaction.
+ last_txn_id = self._get_last_txn(txn, service.id)
+ if (last_txn_id + 1) != txn_id:
+ logger.error(
+ "appservice: Completing a transaction which has an ID > 1 from "
+ "the last ID sent to this AS. We've either dropped events or "
+ "sent it to the AS out of order. FIX ME. last_txn=%s "
+ "completing_txn=%s service_id=%s", last_txn_id, txn_id,
+ service.id
+ )
+
+ # Set current txn_id for AS to 'txn_id'
+ self._simple_upsert_txn(
+ txn, "application_services_state", dict(as_id=service.id),
+ dict(last_txn=txn_id)
+ )
+
+ # Delete txn
+ self._simple_delete_txn(
+ txn, "application_services_txns",
+ dict(txn_id=txn_id, as_id=service.id)
+ )
+
+ def get_oldest_unsent_txn(self, service):
+ """Get the oldest transaction which has not been sent for this
+ service.
+
+ Args:
+ service(ApplicationService): The app service to get the oldest txn.
+ Returns:
+ A Deferred which resolves to an AppServiceTransaction or
+ None.
+ """
+ return self.runInteraction(
+ "get_oldest_unsent_appservice_txn",
+ self._get_oldest_unsent_txn,
+ service
+ )
+
+ def _get_oldest_unsent_txn(self, txn, service):
+ # Monotonically increasing txn ids, so just select the smallest
+ # one in the txns table (we delete them when they are sent)
+ txn.execute(
+ "SELECT * FROM application_services_txns WHERE as_id=?"
+ " ORDER BY txn_id ASC LIMIT 1",
+ (service.id,)
+ )
+ rows = self.cursor_to_dict(txn)
+ if not rows:
+ return None
+
+ entry = rows[0]
+
+ event_ids = json.loads(entry["event_ids"])
+ events = self._get_events_txn(txn, event_ids)
+
+ return AppServiceTransaction(
+ service=service, id=entry["txn_id"], events=events
+ )
+
+ def _get_last_txn(self, txn, service_id):
+ txn.execute(
+ "SELECT last_txn FROM application_services_state WHERE as_id=?",
+ (service_id,)
+ )
+ last_txn_id = txn.fetchone()
+ if last_txn_id is None or last_txn_id[0] is None: # no row exists
+ return 0
+ else:
+ return int(last_txn_id[0]) # select 'last_txn' col
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
new file mode 100644
index 00000000..45fccc2e
--- /dev/null
+++ b/synapse/storage/background_updates.py
@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+
+from twisted.internet import defer
+
+import ujson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class BackgroundUpdatePerformance(object):
+ """Tracks the how long a background update is taking to update its items"""
+
+ def __init__(self, name):
+ self.name = name
+ self.total_item_count = 0
+ self.total_duration_ms = 0
+ self.avg_item_count = 0
+ self.avg_duration_ms = 0
+
+ def update(self, item_count, duration_ms):
+ """Update the stats after doing an update"""
+ self.total_item_count += item_count
+ self.total_duration_ms += duration_ms
+
+ # Exponential moving averages for the number of items updated and
+ # the duration.
+ self.avg_item_count += 0.1 * (item_count - self.avg_item_count)
+ self.avg_duration_ms += 0.1 * (duration_ms - self.avg_duration_ms)
+
+ def average_items_per_ms(self):
+ """An estimate of how long it takes to do a single update.
+ Returns:
+ A duration in ms as a float
+ """
+ if self.total_item_count == 0:
+ return None
+ else:
+ # Use the exponential moving average so that we can adapt to
+ # changes in how long the update process takes.
+ return float(self.avg_item_count) / float(self.avg_duration_ms)
+
+ def total_items_per_ms(self):
+ """An estimate of how long it takes to do a single update.
+ Returns:
+ A duration in ms as a float
+ """
+ if self.total_item_count == 0:
+ return None
+ else:
+ return float(self.total_item_count) / float(self.total_duration_ms)
+
+
+class BackgroundUpdateStore(SQLBaseStore):
+ """ Background updates are updates to the database that run in the
+ background. Each update processes a batch of data at once. We attempt to
+ limit the impact of each update by monitoring how long each batch takes to
+ process and autotuning the batch size.
+ """
+
+ MINIMUM_BACKGROUND_BATCH_SIZE = 100
+ DEFAULT_BACKGROUND_BATCH_SIZE = 100
+ BACKGROUND_UPDATE_INTERVAL_MS = 1000
+ BACKGROUND_UPDATE_DURATION_MS = 100
+
+ def __init__(self, hs):
+ super(BackgroundUpdateStore, self).__init__(hs)
+ self._background_update_performance = {}
+ self._background_update_queue = []
+ self._background_update_handlers = {}
+ self._background_update_timer = None
+
+ @defer.inlineCallbacks
+ def start_doing_background_updates(self):
+ while True:
+ if self._background_update_timer is not None:
+ return
+
+ sleep = defer.Deferred()
+ self._background_update_timer = self._clock.call_later(
+ self.BACKGROUND_UPDATE_INTERVAL_MS / 1000., sleep.callback, None
+ )
+ try:
+ yield sleep
+ finally:
+ self._background_update_timer = None
+
+ try:
+ result = yield self.do_background_update(
+ self.BACKGROUND_UPDATE_DURATION_MS
+ )
+ except:
+ logger.exception("Error doing update")
+
+ if result is None:
+ logger.info(
+ "No more background updates to do."
+ " Unscheduling background update task."
+ )
+ return
+
+ @defer.inlineCallbacks
+ def do_background_update(self, desired_duration_ms):
+ """Does some amount of work on a background update
+ Args:
+ desired_duration_ms(float): How long we want to spend
+ updating.
+ Returns:
+ A deferred that completes once some amount of work is done.
+ The deferred will have a value of None if there is currently
+ no more work to do.
+ """
+ if not self._background_update_queue:
+ updates = yield self._simple_select_list(
+ "background_updates",
+ keyvalues=None,
+ retcols=("update_name",),
+ )
+ for update in updates:
+ self._background_update_queue.append(update['update_name'])
+
+ if not self._background_update_queue:
+ defer.returnValue(None)
+
+ update_name = self._background_update_queue.pop(0)
+ self._background_update_queue.append(update_name)
+
+ update_handler = self._background_update_handlers[update_name]
+
+ performance = self._background_update_performance.get(update_name)
+
+ if performance is None:
+ performance = BackgroundUpdatePerformance(update_name)
+ self._background_update_performance[update_name] = performance
+
+ items_per_ms = performance.average_items_per_ms()
+
+ if items_per_ms is not None:
+ batch_size = int(desired_duration_ms * items_per_ms)
+ # Clamp the batch size so that we always make progress
+ batch_size = max(batch_size, self.MINIMUM_BACKGROUND_BATCH_SIZE)
+ else:
+ batch_size = self.DEFAULT_BACKGROUND_BATCH_SIZE
+
+ progress_json = yield self._simple_select_one_onecol(
+ "background_updates",
+ keyvalues={"update_name": update_name},
+ retcol="progress_json"
+ )
+
+ progress = json.loads(progress_json)
+
+ time_start = self._clock.time_msec()
+ items_updated = yield update_handler(progress, batch_size)
+ time_stop = self._clock.time_msec()
+
+ duration_ms = time_stop - time_start
+
+ logger.info(
+ "Updating %r. Updated %r items in %rms."
+ " (total_rate=%r/ms, current_rate=%r/ms, total_updated=%r)",
+ update_name, items_updated, duration_ms,
+ performance.total_items_per_ms(),
+ performance.average_items_per_ms(),
+ performance.total_item_count,
+ )
+
+ performance.update(items_updated, duration_ms)
+
+ defer.returnValue(len(self._background_update_performance))
+
+ def register_background_update_handler(self, update_name, update_handler):
+ """Register a handler for doing a background update.
+
+ The handler should take two arguments:
+
+ * A dict of the current progress
+ * An integer count of the number of items to update in this batch.
+
+ The handler should return a deferred integer count of items updated.
+ The hander is responsible for updating the progress of the update.
+
+ Args:
+ update_name(str): The name of the update that this code handles.
+ update_handler(function): The function that does the update.
+ """
+ self._background_update_handlers[update_name] = update_handler
+
+ def start_background_update(self, update_name, progress):
+ """Starts a background update running.
+
+ Args:
+ update_name: The update to set running.
+ progress: The initial state of the progress of the update.
+
+ Returns:
+ A deferred that completes once the task has been added to the
+ queue.
+ """
+ # Clear the background update queue so that we will pick up the new
+ # task on the next iteration of do_background_update.
+ self._background_update_queue = []
+ progress_json = json.dumps(progress)
+
+ return self._simple_insert(
+ "background_updates",
+ {"update_name": update_name, "progress_json": progress_json}
+ )
+
+ def _end_background_update(self, update_name):
+ """Removes a completed background update task from the queue.
+
+ Args:
+ update_name(str): The name of the completed task to remove
+ Returns:
+ A deferred that completes once the task is removed.
+ """
+ self._background_update_queue = [
+ name for name in self._background_update_queue if name != update_name
+ ]
+ return self._simple_delete_one(
+ "background_updates", keyvalues={"update_name": update_name}
+ )
+
+ def _background_update_progress_txn(self, txn, update_name, progress):
+ """Update the progress of a background update
+
+ Args:
+ txn(cursor): The transaction.
+ update_name(str): The name of the background update task
+ progress(dict): The progress of the update.
+ """
+
+ progress_json = json.dumps(progress)
+
+ self._simple_update_one_txn(
+ txn,
+ "background_updates",
+ keyvalues={"update_name": update_name},
+ updatevalues={"progress_json": progress_json},
+ )
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
new file mode 100644
index 00000000..d92028ea
--- /dev/null
+++ b/synapse/storage/directory.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+
+from synapse.api.errors import SynapseError
+
+from twisted.internet import defer
+
+from collections import namedtuple
+
+
+RoomAliasMapping = namedtuple(
+ "RoomAliasMapping",
+ ("room_id", "room_alias", "servers",)
+)
+
+
+class DirectoryStore(SQLBaseStore):
+
+ @defer.inlineCallbacks
+ def get_association_from_room_alias(self, room_alias):
+ """ Get's the room_id and server list for a given room_alias
+
+ Args:
+ room_alias (RoomAlias)
+
+ Returns:
+ Deferred: results in namedtuple with keys "room_id" and
+ "servers" or None if no association can be found
+ """
+ room_id = yield self._simple_select_one_onecol(
+ "room_aliases",
+ {"room_alias": room_alias.to_string()},
+ "room_id",
+ allow_none=True,
+ desc="get_association_from_room_alias",
+ )
+
+ if not room_id:
+ defer.returnValue(None)
+ return
+
+ servers = yield self._simple_select_onecol(
+ "room_alias_servers",
+ {"room_alias": room_alias.to_string()},
+ "server",
+ desc="get_association_from_room_alias",
+ )
+
+ if not servers:
+ defer.returnValue(None)
+ return
+
+ defer.returnValue(
+ RoomAliasMapping(room_id, room_alias.to_string(), servers)
+ )
+
+ @defer.inlineCallbacks
+ def create_room_alias_association(self, room_alias, room_id, servers):
+ """ Creates an associatin between a room alias and room_id/servers
+
+ Args:
+ room_alias (RoomAlias)
+ room_id (str)
+ servers (list)
+
+ Returns:
+ Deferred
+ """
+ try:
+ yield self._simple_insert(
+ "room_aliases",
+ {
+ "room_alias": room_alias.to_string(),
+ "room_id": room_id,
+ },
+ desc="create_room_alias_association",
+ )
+ except self.database_engine.module.IntegrityError:
+ raise SynapseError(
+ 409, "Room alias %s already exists" % room_alias.to_string()
+ )
+
+ for server in servers:
+ # TODO(erikj): Fix this to bulk insert
+ yield self._simple_insert(
+ "room_alias_servers",
+ {
+ "room_alias": room_alias.to_string(),
+ "server": server,
+ },
+ desc="create_room_alias_association",
+ )
+ self.get_aliases_for_room.invalidate((room_id,))
+
+ @defer.inlineCallbacks
+ def delete_room_alias(self, room_alias):
+ room_id = yield self.runInteraction(
+ "delete_room_alias",
+ self._delete_room_alias_txn,
+ room_alias,
+ )
+
+ self.get_aliases_for_room.invalidate((room_id,))
+ defer.returnValue(room_id)
+
+ def _delete_room_alias_txn(self, txn, room_alias):
+ txn.execute(
+ "SELECT room_id FROM room_aliases WHERE room_alias = ?",
+ (room_alias.to_string(),)
+ )
+
+ res = txn.fetchone()
+ if res:
+ room_id = res[0]
+ else:
+ return None
+
+ txn.execute(
+ "DELETE FROM room_aliases WHERE room_alias = ?",
+ (room_alias.to_string(),)
+ )
+
+ txn.execute(
+ "DELETE FROM room_alias_servers WHERE room_alias = ?",
+ (room_alias.to_string(),)
+ )
+
+ return room_id
+
+ @cached()
+ def get_aliases_for_room(self, room_id):
+ return self._simple_select_onecol(
+ "room_aliases",
+ {"room_id": room_id},
+ "room_alias",
+ desc="get_aliases_for_room",
+ )
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
new file mode 100644
index 00000000..325740d7
--- /dev/null
+++ b/synapse/storage/end_to_end_keys.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from _base import SQLBaseStore
+
+
+class EndToEndKeyStore(SQLBaseStore):
+ def set_e2e_device_keys(self, user_id, device_id, time_now, json_bytes):
+ return self._simple_upsert(
+ table="e2e_device_keys_json",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ },
+ values={
+ "ts_added_ms": time_now,
+ "key_json": json_bytes,
+ }
+ )
+
+ def get_e2e_device_keys(self, query_list):
+ """Fetch a list of device keys.
+ Args:
+ query_list(list): List of pairs of user_ids and device_ids.
+ Returns:
+ Dict mapping from user-id to dict mapping from device_id to
+ key json byte strings.
+ """
+ def _get_e2e_device_keys(txn):
+ result = {}
+ for user_id, device_id in query_list:
+ user_result = result.setdefault(user_id, {})
+ keyvalues = {"user_id": user_id}
+ if device_id:
+ keyvalues["device_id"] = device_id
+ rows = self._simple_select_list_txn(
+ txn, table="e2e_device_keys_json",
+ keyvalues=keyvalues,
+ retcols=["device_id", "key_json"]
+ )
+ for row in rows:
+ user_result[row["device_id"]] = row["key_json"]
+ return result
+ return self.runInteraction("get_e2e_device_keys", _get_e2e_device_keys)
+
+ def add_e2e_one_time_keys(self, user_id, device_id, time_now, key_list):
+ def _add_e2e_one_time_keys(txn):
+ for (algorithm, key_id, json_bytes) in key_list:
+ self._simple_upsert_txn(
+ txn, table="e2e_one_time_keys_json",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ "algorithm": algorithm,
+ "key_id": key_id,
+ },
+ values={
+ "ts_added_ms": time_now,
+ "key_json": json_bytes,
+ }
+ )
+ return self.runInteraction(
+ "add_e2e_one_time_keys", _add_e2e_one_time_keys
+ )
+
+ def count_e2e_one_time_keys(self, user_id, device_id):
+ """ Count the number of one time keys the server has for a device
+ Returns:
+ Dict mapping from algorithm to number of keys for that algorithm.
+ """
+ def _count_e2e_one_time_keys(txn):
+ sql = (
+ "SELECT algorithm, COUNT(key_id) FROM e2e_one_time_keys_json"
+ " WHERE user_id = ? AND device_id = ?"
+ " GROUP BY algorithm"
+ )
+ txn.execute(sql, (user_id, device_id))
+ result = {}
+ for algorithm, key_count in txn.fetchall():
+ result[algorithm] = key_count
+ return result
+ return self.runInteraction(
+ "count_e2e_one_time_keys", _count_e2e_one_time_keys
+ )
+
+ def claim_e2e_one_time_keys(self, query_list):
+ """Take a list of one time keys out of the database"""
+ def _claim_e2e_one_time_keys(txn):
+ sql = (
+ "SELECT key_id, key_json FROM e2e_one_time_keys_json"
+ " WHERE user_id = ? AND device_id = ? AND algorithm = ?"
+ " LIMIT 1"
+ )
+ result = {}
+ delete = []
+ for user_id, device_id, algorithm in query_list:
+ user_result = result.setdefault(user_id, {})
+ device_result = user_result.setdefault(device_id, {})
+ txn.execute(sql, (user_id, device_id, algorithm))
+ for key_id, key_json in txn.fetchall():
+ device_result[algorithm + ":" + key_id] = key_json
+ delete.append((user_id, device_id, algorithm, key_id))
+ sql = (
+ "DELETE FROM e2e_one_time_keys_json"
+ " WHERE user_id = ? AND device_id = ? AND algorithm = ?"
+ " AND key_id = ?"
+ )
+ for user_id, device_id, algorithm, key_id in delete:
+ txn.execute(sql, (user_id, device_id, algorithm, key_id))
+ return result
+ return self.runInteraction(
+ "claim_e2e_one_time_keys", _claim_e2e_one_time_keys
+ )
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
new file mode 100644
index 00000000..bd3c8f94
--- /dev/null
+++ b/synapse/storage/engines/__init__.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import IncorrectDatabaseSetup
+from .postgres import PostgresEngine
+from .sqlite3 import Sqlite3Engine
+
+import importlib
+
+
+SUPPORTED_MODULE = {
+ "sqlite3": Sqlite3Engine,
+ "psycopg2": PostgresEngine,
+}
+
+
+def create_engine(name):
+ engine_class = SUPPORTED_MODULE.get(name, None)
+
+ if engine_class:
+ module = importlib.import_module(name)
+ return engine_class(module)
+
+ raise RuntimeError(
+ "Unsupported database engine '%s'" % (name,)
+ )
+
+
+__all__ = ["create_engine", "IncorrectDatabaseSetup"]
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
new file mode 100644
index 00000000..0b549d31
--- /dev/null
+++ b/synapse/storage/engines/_base.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class IncorrectDatabaseSetup(RuntimeError):
+ pass
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
new file mode 100644
index 00000000..98d66e0a
--- /dev/null
+++ b/synapse/storage/engines/postgres.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.prepare_database import prepare_database
+
+from ._base import IncorrectDatabaseSetup
+
+
+class PostgresEngine(object):
+ single_threaded = False
+
+ def __init__(self, database_module):
+ self.module = database_module
+ self.module.extensions.register_type(self.module.extensions.UNICODE)
+
+ def check_database(self, txn):
+ txn.execute("SHOW SERVER_ENCODING")
+ rows = txn.fetchall()
+ if rows and rows[0][0] != "UTF8":
+ raise IncorrectDatabaseSetup(
+ "Database has incorrect encoding: '%s' instead of 'UTF8'\n"
+ "See docs/postgres.rst for more information."
+ % (rows[0][0],)
+ )
+
+ def convert_param_style(self, sql):
+ return sql.replace("?", "%s")
+
+ def on_new_connection(self, db_conn):
+ db_conn.set_isolation_level(
+ self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
+ )
+
+ def prepare_database(self, db_conn):
+ prepare_database(db_conn, self)
+
+ def is_deadlock(self, error):
+ if isinstance(error, self.module.DatabaseError):
+ return error.pgcode in ["40001", "40P01"]
+ return False
+
+ def is_connection_closed(self, conn):
+ return bool(conn.closed)
+
+ def lock_table(self, txn, table):
+ txn.execute("LOCK TABLE %s in EXCLUSIVE MODE" % (table,))
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py
new file mode 100644
index 00000000..a5a54ec0
--- /dev/null
+++ b/synapse/storage/engines/sqlite3.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.prepare_database import (
+ prepare_database, prepare_sqlite3_database
+)
+
+import struct
+
+
+class Sqlite3Engine(object):
+ single_threaded = True
+
+ def __init__(self, database_module):
+ self.module = database_module
+
+ def check_database(self, txn):
+ pass
+
+ def convert_param_style(self, sql):
+ return sql
+
+ def on_new_connection(self, db_conn):
+ self.prepare_database(db_conn)
+ db_conn.create_function("rank", 1, _rank)
+
+ def prepare_database(self, db_conn):
+ prepare_sqlite3_database(db_conn)
+ prepare_database(db_conn, self)
+
+ def is_deadlock(self, error):
+ return False
+
+ def is_connection_closed(self, conn):
+ return False
+
+ def lock_table(self, txn, table):
+ return
+
+
+# Following functions taken from: https://github.com/coleifer/peewee
+
+def _parse_match_info(buf):
+ bufsize = len(buf)
+ return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
+
+
+def _rank(raw_match_info):
+ """Handle match_info called w/default args 'pcx' - based on the example rank
+ function http://sqlite.org/fts3.html#appendix_a
+ """
+ match_info = _parse_match_info(raw_match_info)
+ score = 0.0
+ p, c = match_info[:2]
+ for phrase_num in range(p):
+ phrase_info_idx = 2 + (phrase_num * c * 3)
+ for col_num in range(c):
+ col_idx = phrase_info_idx + (col_num * 3)
+ x1, x2 = match_info[col_idx:col_idx + 2]
+ if x1 > 0:
+ score += float(x1) / x2
+ return score
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
new file mode 100644
index 00000000..6d4421dd
--- /dev/null
+++ b/synapse/storage/event_federation.py
@@ -0,0 +1,432 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+from unpaddedbase64 import encode_base64
+
+import logging
+from Queue import PriorityQueue, Empty
+
+
+logger = logging.getLogger(__name__)
+
+
+class EventFederationStore(SQLBaseStore):
+ """ Responsible for storing and serving up the various graphs associated
+ with an event. Including the main event graph and the auth chains for an
+ event.
+
+ Also has methods for getting the front (latest) and back (oldest) edges
+ of the event graphs. These are used to generate the parents for new events
+ and backfilling from another server respectively.
+ """
+
+ def get_auth_chain(self, event_ids):
+ return self.get_auth_chain_ids(event_ids).addCallback(self._get_events)
+
+ def get_auth_chain_ids(self, event_ids):
+ return self.runInteraction(
+ "get_auth_chain_ids",
+ self._get_auth_chain_ids_txn,
+ event_ids
+ )
+
+ def _get_auth_chain_ids_txn(self, txn, event_ids):
+ results = set()
+
+ base_sql = (
+ "SELECT auth_id FROM event_auth WHERE event_id IN (%s)"
+ )
+
+ front = set(event_ids)
+ while front:
+ new_front = set()
+ front_list = list(front)
+ chunks = [
+ front_list[x:x+100]
+ for x in xrange(0, len(front), 100)
+ ]
+ for chunk in chunks:
+ txn.execute(
+ base_sql % (",".join(["?"] * len(chunk)),),
+ chunk
+ )
+ new_front.update([r[0] for r in txn.fetchall()])
+
+ new_front -= results
+
+ front = new_front
+ results.update(front)
+
+ return list(results)
+
+ def get_oldest_events_in_room(self, room_id):
+ return self.runInteraction(
+ "get_oldest_events_in_room",
+ self._get_oldest_events_in_room_txn,
+ room_id,
+ )
+
+ def get_oldest_events_with_depth_in_room(self, room_id):
+ return self.runInteraction(
+ "get_oldest_events_with_depth_in_room",
+ self.get_oldest_events_with_depth_in_room_txn,
+ room_id,
+ )
+
+ def get_oldest_events_with_depth_in_room_txn(self, txn, room_id):
+ sql = (
+ "SELECT b.event_id, MAX(e.depth) FROM events as e"
+ " INNER JOIN event_edges as g"
+ " ON g.event_id = e.event_id AND g.room_id = e.room_id"
+ " INNER JOIN event_backward_extremities as b"
+ " ON g.prev_event_id = b.event_id AND g.room_id = b.room_id"
+ " WHERE b.room_id = ? AND g.is_state is ?"
+ " GROUP BY b.event_id"
+ )
+
+ txn.execute(sql, (room_id, False,))
+
+ return dict(txn.fetchall())
+
+ def _get_oldest_events_in_room_txn(self, txn, room_id):
+ return self._simple_select_onecol_txn(
+ txn,
+ table="event_backward_extremities",
+ keyvalues={
+ "room_id": room_id,
+ },
+ retcol="event_id",
+ )
+
+ def get_latest_events_in_room(self, room_id):
+ return self.runInteraction(
+ "get_latest_events_in_room",
+ self._get_latest_events_in_room,
+ room_id,
+ )
+
+ @cached()
+ def get_latest_event_ids_in_room(self, room_id):
+ return self._simple_select_onecol(
+ table="event_forward_extremities",
+ keyvalues={
+ "room_id": room_id,
+ },
+ retcol="event_id",
+ desc="get_latest_event_ids_in_room",
+ )
+
+ def _get_latest_events_in_room(self, txn, room_id):
+ sql = (
+ "SELECT e.event_id, e.depth FROM events as e "
+ "INNER JOIN event_forward_extremities as f "
+ "ON e.event_id = f.event_id "
+ "AND e.room_id = f.room_id "
+ "WHERE f.room_id = ?"
+ )
+
+ txn.execute(sql, (room_id, ))
+
+ results = []
+ for event_id, depth in txn.fetchall():
+ hashes = self._get_event_reference_hashes_txn(txn, event_id)
+ prev_hashes = {
+ k: encode_base64(v) for k, v in hashes.items()
+ if k == "sha256"
+ }
+ results.append((event_id, prev_hashes, depth))
+
+ return results
+
+ def get_min_depth(self, room_id):
+ """ For hte given room, get the minimum depth we have seen for it.
+ """
+ return self.runInteraction(
+ "get_min_depth",
+ self._get_min_depth_interaction,
+ room_id,
+ )
+
+ def _get_min_depth_interaction(self, txn, room_id):
+ min_depth = self._simple_select_one_onecol_txn(
+ txn,
+ table="room_depth",
+ keyvalues={"room_id": room_id},
+ retcol="min_depth",
+ allow_none=True,
+ )
+
+ return int(min_depth) if min_depth is not None else None
+
+ def _update_min_depth_for_room_txn(self, txn, room_id, depth):
+ min_depth = self._get_min_depth_interaction(txn, room_id)
+
+ do_insert = depth < min_depth if min_depth else True
+
+ if do_insert:
+ self._simple_upsert_txn(
+ txn,
+ table="room_depth",
+ keyvalues={
+ "room_id": room_id,
+ },
+ values={
+ "min_depth": depth,
+ },
+ )
+
+ def _handle_mult_prev_events(self, txn, events):
+ """
+ For the given event, update the event edges table and forward and
+ backward extremities tables.
+ """
+ self._simple_insert_many_txn(
+ txn,
+ table="event_edges",
+ values=[
+ {
+ "event_id": ev.event_id,
+ "prev_event_id": e_id,
+ "room_id": ev.room_id,
+ "is_state": False,
+ }
+ for ev in events
+ for e_id, _ in ev.prev_events
+ ],
+ )
+
+ self._update_extremeties(txn, events)
+
+ def _update_extremeties(self, txn, events):
+ """Updates the event_*_extremities tables based on the new/updated
+ events being persisted.
+
+ This is called for new events *and* for events that were outliers, but
+ are are now being persisted as non-outliers.
+ """
+ events_by_room = {}
+ for ev in events:
+ events_by_room.setdefault(ev.room_id, []).append(ev)
+
+ for room_id, room_events in events_by_room.items():
+ prevs = [
+ e_id for ev in room_events for e_id, _ in ev.prev_events
+ if not ev.internal_metadata.is_outlier()
+ ]
+ if prevs:
+ txn.execute(
+ "DELETE FROM event_forward_extremities"
+ " WHERE room_id = ?"
+ " AND event_id in (%s)" % (
+ ",".join(["?"] * len(prevs)),
+ ),
+ [room_id] + prevs,
+ )
+
+ query = (
+ "INSERT INTO event_forward_extremities (event_id, room_id)"
+ " SELECT ?, ? WHERE NOT EXISTS ("
+ " SELECT 1 FROM event_edges WHERE prev_event_id = ?"
+ " )"
+ )
+
+ txn.executemany(
+ query,
+ [
+ (ev.event_id, ev.room_id, ev.event_id) for ev in events
+ if not ev.internal_metadata.is_outlier()
+ ]
+ )
+
+ query = (
+ "INSERT INTO event_backward_extremities (event_id, room_id)"
+ " SELECT ?, ? WHERE NOT EXISTS ("
+ " SELECT 1 FROM event_backward_extremities"
+ " WHERE event_id = ? AND room_id = ?"
+ " )"
+ " AND NOT EXISTS ("
+ " SELECT 1 FROM events WHERE event_id = ? AND room_id = ? "
+ " AND outlier = ?"
+ " )"
+ )
+
+ txn.executemany(query, [
+ (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
+ for ev in events for e_id, _ in ev.prev_events
+ if not ev.internal_metadata.is_outlier()
+ ])
+
+ query = (
+ "DELETE FROM event_backward_extremities"
+ " WHERE event_id = ? AND room_id = ?"
+ )
+ txn.executemany(
+ query,
+ [
+ (ev.event_id, ev.room_id) for ev in events
+ if not ev.internal_metadata.is_outlier()
+ ]
+ )
+
+ for room_id in events_by_room:
+ txn.call_after(
+ self.get_latest_event_ids_in_room.invalidate, (room_id,)
+ )
+
+ def get_backfill_events(self, room_id, event_list, limit):
+ """Get a list of Events for a given topic that occurred before (and
+ including) the events in event_list. Return a list of max size `limit`
+
+ Args:
+ txn
+ room_id (str)
+ event_list (list)
+ limit (int)
+ """
+ return self.runInteraction(
+ "get_backfill_events",
+ self._get_backfill_events, room_id, event_list, limit
+ ).addCallback(
+ self._get_events
+ ).addCallback(
+ lambda l: sorted(l, key=lambda e: -e.depth)
+ )
+
+ def _get_backfill_events(self, txn, room_id, event_list, limit):
+ logger.debug(
+ "_get_backfill_events: %s, %s, %s",
+ room_id, repr(event_list), limit
+ )
+
+ event_results = set()
+
+ # We want to make sure that we do a breadth-first, "depth" ordered
+ # search.
+
+ query = (
+ "SELECT depth, prev_event_id FROM event_edges"
+ " INNER JOIN events"
+ " ON prev_event_id = events.event_id"
+ " AND event_edges.room_id = events.room_id"
+ " WHERE event_edges.room_id = ? AND event_edges.event_id = ?"
+ " AND event_edges.is_state = ?"
+ " LIMIT ?"
+ )
+
+ queue = PriorityQueue()
+
+ for event_id in event_list:
+ depth = self._simple_select_one_onecol_txn(
+ txn,
+ table="events",
+ keyvalues={
+ "event_id": event_id,
+ },
+ retcol="depth",
+ allow_none=True,
+ )
+
+ if depth:
+ queue.put((-depth, event_id))
+
+ while not queue.empty() and len(event_results) < limit:
+ try:
+ _, event_id = queue.get_nowait()
+ except Empty:
+ break
+
+ if event_id in event_results:
+ continue
+
+ event_results.add(event_id)
+
+ txn.execute(
+ query,
+ (room_id, event_id, False, limit - len(event_results))
+ )
+
+ for row in txn.fetchall():
+ if row[1] not in event_results:
+ queue.put((-row[0], row[1]))
+
+ return event_results
+
+ @defer.inlineCallbacks
+ def get_missing_events(self, room_id, earliest_events, latest_events,
+ limit, min_depth):
+ ids = yield self.runInteraction(
+ "get_missing_events",
+ self._get_missing_events,
+ room_id, earliest_events, latest_events, limit, min_depth
+ )
+
+ events = yield self._get_events(ids)
+
+ events = sorted(
+ [ev for ev in events if ev.depth >= min_depth],
+ key=lambda e: e.depth,
+ )
+
+ defer.returnValue(events[:limit])
+
+ def _get_missing_events(self, txn, room_id, earliest_events, latest_events,
+ limit, min_depth):
+
+ earliest_events = set(earliest_events)
+ front = set(latest_events) - earliest_events
+
+ event_results = set()
+
+ query = (
+ "SELECT prev_event_id FROM event_edges "
+ "WHERE room_id = ? AND event_id = ? AND is_state = ? "
+ "LIMIT ?"
+ )
+
+ while front and len(event_results) < limit:
+ new_front = set()
+ for event_id in front:
+ txn.execute(
+ query,
+ (room_id, event_id, False, limit - len(event_results))
+ )
+
+ for e_id, in txn.fetchall():
+ new_front.add(e_id)
+
+ new_front -= earliest_events
+ new_front -= event_results
+
+ front = new_front
+ event_results |= new_front
+
+ return event_results
+
+ def clean_room_for_join(self, room_id):
+ return self.runInteraction(
+ "clean_room_for_join",
+ self._clean_room_for_join_txn,
+ room_id,
+ )
+
+ def _clean_room_for_join_txn(self, txn, room_id):
+ query = "DELETE FROM event_forward_extremities WHERE room_id = ?"
+
+ txn.execute(query, (room_id,))
+ txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
new file mode 100644
index 00000000..5d35ca90
--- /dev/null
+++ b/synapse/storage/events.py
@@ -0,0 +1,966 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from _base import SQLBaseStore, _RollbackButIsFineException
+
+from twisted.internet import defer, reactor
+
+from synapse.events import FrozenEvent, USE_FROZEN_DICTS
+from synapse.events.utils import prune_event
+
+from synapse.util.logcontext import preserve_context_over_deferred
+from synapse.util.logutils import log_function
+from synapse.api.constants import EventTypes
+
+from canonicaljson import encode_canonical_json
+from contextlib import contextmanager
+
+import logging
+import math
+import ujson as json
+
+logger = logging.getLogger(__name__)
+
+
+def encode_json(json_object):
+ if USE_FROZEN_DICTS:
+ # ujson doesn't like frozen_dicts
+ return encode_canonical_json(json_object)
+ else:
+ return json.dumps(json_object, ensure_ascii=False)
+
+# These values are used in the `enqueus_event` and `_do_fetch` methods to
+# control how we batch/bulk fetch events from the database.
+# The values are plucked out of thing air to make initial sync run faster
+# on jki.re
+# TODO: Make these configurable.
+EVENT_QUEUE_THREADS = 3 # Max number of threads that will fetch events
+EVENT_QUEUE_ITERATIONS = 3 # No. times we block waiting for requests for events
+EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
+
+
+class EventsStore(SQLBaseStore):
+ @defer.inlineCallbacks
+ def persist_events(self, events_and_contexts, backfilled=False,
+ is_new_state=True):
+ if not events_and_contexts:
+ return
+
+ if backfilled:
+ if not self.min_token_deferred.called:
+ yield self.min_token_deferred
+ start = self.min_token - 1
+ self.min_token -= len(events_and_contexts) + 1
+ stream_orderings = range(start, self.min_token, -1)
+
+ @contextmanager
+ def stream_ordering_manager():
+ yield stream_orderings
+ stream_ordering_manager = stream_ordering_manager()
+ else:
+ stream_ordering_manager = yield self._stream_id_gen.get_next_mult(
+ self, len(events_and_contexts)
+ )
+
+ with stream_ordering_manager as stream_orderings:
+ for (event, _), stream in zip(events_and_contexts, stream_orderings):
+ event.internal_metadata.stream_ordering = stream
+
+ chunks = [
+ events_and_contexts[x:x+100]
+ for x in xrange(0, len(events_and_contexts), 100)
+ ]
+
+ for chunk in chunks:
+ # We can't easily parallelize these since different chunks
+ # might contain the same event. :(
+ yield self.runInteraction(
+ "persist_events",
+ self._persist_events_txn,
+ events_and_contexts=chunk,
+ backfilled=backfilled,
+ is_new_state=is_new_state,
+ )
+
+ @defer.inlineCallbacks
+ @log_function
+ def persist_event(self, event, context, backfilled=False,
+ is_new_state=True, current_state=None):
+ stream_ordering = None
+ if backfilled:
+ if not self.min_token_deferred.called:
+ yield self.min_token_deferred
+ self.min_token -= 1
+ stream_ordering = self.min_token
+
+ if stream_ordering is None:
+ stream_ordering_manager = yield self._stream_id_gen.get_next(self)
+ else:
+ @contextmanager
+ def stream_ordering_manager():
+ yield stream_ordering
+ stream_ordering_manager = stream_ordering_manager()
+
+ try:
+ with stream_ordering_manager as stream_ordering:
+ event.internal_metadata.stream_ordering = stream_ordering
+ yield self.runInteraction(
+ "persist_event",
+ self._persist_event_txn,
+ event=event,
+ context=context,
+ backfilled=backfilled,
+ is_new_state=is_new_state,
+ current_state=current_state,
+ )
+ except _RollbackButIsFineException:
+ pass
+
+ max_persisted_id = yield self._stream_id_gen.get_max_token(self)
+ defer.returnValue((stream_ordering, max_persisted_id))
+
+ @defer.inlineCallbacks
+ def get_event(self, event_id, check_redacted=True,
+ get_prev_content=False, allow_rejected=False,
+ allow_none=False):
+ """Get an event from the database by event_id.
+
+ Args:
+ event_id (str): The event_id of the event to fetch
+ check_redacted (bool): If True, check if event has been redacted
+ and redact it.
+ get_prev_content (bool): If True and event is a state event,
+ include the previous states content in the unsigned field.
+ allow_rejected (bool): If True return rejected events.
+ allow_none (bool): If True, return None if no event found, if
+ False throw an exception.
+
+ Returns:
+ Deferred : A FrozenEvent.
+ """
+ events = yield self._get_events(
+ [event_id],
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ if not events and not allow_none:
+ raise RuntimeError("Could not find event %s" % (event_id,))
+
+ defer.returnValue(events[0] if events else None)
+
+ @log_function
+ def _persist_event_txn(self, txn, event, context, backfilled,
+ is_new_state=True, current_state=None):
+ # We purposefully do this first since if we include a `current_state`
+ # key, we *want* to update the `current_state_events` table
+ if current_state:
+ txn.call_after(self.get_current_state_for_key.invalidate_all)
+ txn.call_after(self.get_rooms_for_user.invalidate_all)
+ txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
+ txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
+ txn.call_after(self.get_room_name_and_aliases, event.room_id)
+
+ self._simple_delete_txn(
+ txn,
+ table="current_state_events",
+ keyvalues={"room_id": event.room_id},
+ )
+
+ for s in current_state:
+ self._simple_insert_txn(
+ txn,
+ "current_state_events",
+ {
+ "event_id": s.event_id,
+ "room_id": s.room_id,
+ "type": s.type,
+ "state_key": s.state_key,
+ }
+ )
+
+ return self._persist_events_txn(
+ txn,
+ [(event, context)],
+ backfilled=backfilled,
+ is_new_state=is_new_state,
+ )
+
+ @log_function
+ def _persist_events_txn(self, txn, events_and_contexts, backfilled,
+ is_new_state=True):
+
+ # Remove the any existing cache entries for the event_ids
+ for event, _ in events_and_contexts:
+ txn.call_after(self._invalidate_get_event_cache, event.event_id)
+
+ depth_updates = {}
+ for event, _ in events_and_contexts:
+ if event.internal_metadata.is_outlier():
+ continue
+ depth_updates[event.room_id] = max(
+ event.depth, depth_updates.get(event.room_id, event.depth)
+ )
+
+ for room_id, depth in depth_updates.items():
+ self._update_min_depth_for_room_txn(txn, room_id, depth)
+
+ txn.execute(
+ "SELECT event_id, outlier FROM events WHERE event_id in (%s)" % (
+ ",".join(["?"] * len(events_and_contexts)),
+ ),
+ [event.event_id for event, _ in events_and_contexts]
+ )
+ have_persisted = {
+ event_id: outlier
+ for event_id, outlier in txn.fetchall()
+ }
+
+ event_map = {}
+ to_remove = set()
+ for event, context in events_and_contexts:
+ # Handle the case of the list including the same event multiple
+ # times. The tricky thing here is when they differ by whether
+ # they are an outlier.
+ if event.event_id in event_map:
+ other = event_map[event.event_id]
+
+ if not other.internal_metadata.is_outlier():
+ to_remove.add(event)
+ continue
+ elif not event.internal_metadata.is_outlier():
+ to_remove.add(event)
+ continue
+ else:
+ to_remove.add(other)
+
+ event_map[event.event_id] = event
+
+ if event.event_id not in have_persisted:
+ continue
+
+ to_remove.add(event)
+
+ outlier_persisted = have_persisted[event.event_id]
+ if not event.internal_metadata.is_outlier() and outlier_persisted:
+ self._store_state_groups_txn(
+ txn, event, context,
+ )
+
+ metadata_json = encode_json(
+ event.internal_metadata.get_dict()
+ ).decode("UTF-8")
+
+ sql = (
+ "UPDATE event_json SET internal_metadata = ?"
+ " WHERE event_id = ?"
+ )
+ txn.execute(
+ sql,
+ (metadata_json, event.event_id,)
+ )
+
+ sql = (
+ "UPDATE events SET outlier = ?"
+ " WHERE event_id = ?"
+ )
+ txn.execute(
+ sql,
+ (False, event.event_id,)
+ )
+
+ self._update_extremeties(txn, [event])
+
+ events_and_contexts = filter(
+ lambda ec: ec[0] not in to_remove,
+ events_and_contexts
+ )
+
+ if not events_and_contexts:
+ return
+
+ self._store_mult_state_groups_txn(txn, [
+ (event, context)
+ for event, context in events_and_contexts
+ if not event.internal_metadata.is_outlier()
+ ])
+
+ self._handle_mult_prev_events(
+ txn,
+ events=[event for event, _ in events_and_contexts],
+ )
+
+ for event, _ in events_and_contexts:
+ if event.type == EventTypes.Name:
+ self._store_room_name_txn(txn, event)
+ elif event.type == EventTypes.Topic:
+ self._store_room_topic_txn(txn, event)
+ elif event.type == EventTypes.Message:
+ self._store_room_message_txn(txn, event)
+ elif event.type == EventTypes.Redaction:
+ self._store_redaction(txn, event)
+ elif event.type == EventTypes.RoomHistoryVisibility:
+ self._store_history_visibility_txn(txn, event)
+ elif event.type == EventTypes.GuestAccess:
+ self._store_guest_access_txn(txn, event)
+
+ self._store_room_members_txn(
+ txn,
+ [
+ event
+ for event, _ in events_and_contexts
+ if event.type == EventTypes.Member
+ ]
+ )
+
+ def event_dict(event):
+ return {
+ k: v
+ for k, v in event.get_dict().items()
+ if k not in [
+ "redacted",
+ "redacted_because",
+ ]
+ }
+
+ self._simple_insert_many_txn(
+ txn,
+ table="event_json",
+ values=[
+ {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "internal_metadata": encode_json(
+ event.internal_metadata.get_dict()
+ ).decode("UTF-8"),
+ "json": encode_json(event_dict(event)).decode("UTF-8"),
+ }
+ for event, _ in events_and_contexts
+ ],
+ )
+
+ self._simple_insert_many_txn(
+ txn,
+ table="events",
+ values=[
+ {
+ "stream_ordering": event.internal_metadata.stream_ordering,
+ "topological_ordering": event.depth,
+ "depth": event.depth,
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "type": event.type,
+ "processed": True,
+ "outlier": event.internal_metadata.is_outlier(),
+ "content": encode_json(event.content).decode("UTF-8"),
+ }
+ for event, _ in events_and_contexts
+ ],
+ )
+
+ if context.rejected:
+ self._store_rejections_txn(
+ txn, event.event_id, context.rejected
+ )
+
+ self._simple_insert_many_txn(
+ txn,
+ table="event_auth",
+ values=[
+ {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "auth_id": auth_id,
+ }
+ for event, _ in events_and_contexts
+ for auth_id, _ in event.auth_events
+ ],
+ )
+
+ self._store_event_reference_hashes_txn(
+ txn, [event for event, _ in events_and_contexts]
+ )
+
+ state_events_and_contexts = filter(
+ lambda i: i[0].is_state(),
+ events_and_contexts,
+ )
+
+ state_values = []
+ for event, context in state_events_and_contexts:
+ vals = {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "type": event.type,
+ "state_key": event.state_key,
+ }
+
+ # TODO: How does this work with backfilling?
+ if hasattr(event, "replaces_state"):
+ vals["prev_state"] = event.replaces_state
+
+ state_values.append(vals)
+
+ self._simple_insert_many_txn(
+ txn,
+ table="state_events",
+ values=state_values,
+ )
+
+ self._simple_insert_many_txn(
+ txn,
+ table="event_edges",
+ values=[
+ {
+ "event_id": event.event_id,
+ "prev_event_id": prev_id,
+ "room_id": event.room_id,
+ "is_state": True,
+ }
+ for event, _ in state_events_and_contexts
+ for prev_id, _ in event.prev_state
+ ],
+ )
+
+ if is_new_state:
+ for event, _ in state_events_and_contexts:
+ if not context.rejected:
+ txn.call_after(
+ self.get_current_state_for_key.invalidate,
+ (event.room_id, event.type, event.state_key,)
+ )
+
+ if event.type in [EventTypes.Name, EventTypes.Aliases]:
+ txn.call_after(
+ self.get_room_name_and_aliases.invalidate,
+ (event.room_id,)
+ )
+
+ self._simple_upsert_txn(
+ txn,
+ "current_state_events",
+ keyvalues={
+ "room_id": event.room_id,
+ "type": event.type,
+ "state_key": event.state_key,
+ },
+ values={
+ "event_id": event.event_id,
+ }
+ )
+
+ return
+
+ def _store_redaction(self, txn, event):
+ # invalidate the cache for the redacted event
+ txn.call_after(self._invalidate_get_event_cache, event.redacts)
+ txn.execute(
+ "INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
+ (event.event_id, event.redacts)
+ )
+
+ def have_events(self, event_ids):
+ """Given a list of event ids, check if we have already processed them.
+
+ Returns:
+ dict: Has an entry for each event id we already have seen. Maps to
+ the rejected reason string if we rejected the event, else maps to
+ None.
+ """
+ if not event_ids:
+ return defer.succeed({})
+
+ def f(txn):
+ sql = (
+ "SELECT e.event_id, reason FROM events as e "
+ "LEFT JOIN rejections as r ON e.event_id = r.event_id "
+ "WHERE e.event_id = ?"
+ )
+
+ res = {}
+ for event_id in event_ids:
+ txn.execute(sql, (event_id,))
+ row = txn.fetchone()
+ if row:
+ _, rejected = row
+ res[event_id] = rejected
+
+ return res
+
+ return self.runInteraction(
+ "have_events", f,
+ )
+
+ @defer.inlineCallbacks
+ def _get_events(self, event_ids, check_redacted=True,
+ get_prev_content=False, allow_rejected=False):
+ if not event_ids:
+ defer.returnValue([])
+
+ event_map = self._get_events_from_cache(
+ event_ids,
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ missing_events_ids = [e for e in event_ids if e not in event_map]
+
+ if not missing_events_ids:
+ defer.returnValue([
+ event_map[e_id] for e_id in event_ids
+ if e_id in event_map and event_map[e_id]
+ ])
+
+ missing_events = yield self._enqueue_events(
+ missing_events_ids,
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ event_map.update(missing_events)
+
+ defer.returnValue([
+ event_map[e_id] for e_id in event_ids
+ if e_id in event_map and event_map[e_id]
+ ])
+
+ def _get_events_txn(self, txn, event_ids, check_redacted=True,
+ get_prev_content=False, allow_rejected=False):
+ if not event_ids:
+ return []
+
+ event_map = self._get_events_from_cache(
+ event_ids,
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ missing_events_ids = [e for e in event_ids if e not in event_map]
+
+ if not missing_events_ids:
+ return [
+ event_map[e_id] for e_id in event_ids
+ if e_id in event_map and event_map[e_id]
+ ]
+
+ missing_events = self._fetch_events_txn(
+ txn,
+ missing_events_ids,
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ event_map.update(missing_events)
+
+ return [
+ event_map[e_id] for e_id in event_ids
+ if e_id in event_map and event_map[e_id]
+ ]
+
+ def _invalidate_get_event_cache(self, event_id):
+ for check_redacted in (False, True):
+ for get_prev_content in (False, True):
+ self._get_event_cache.invalidate(
+ (event_id, check_redacted, get_prev_content)
+ )
+
+ def _get_event_txn(self, txn, event_id, check_redacted=True,
+ get_prev_content=False, allow_rejected=False):
+
+ events = self._get_events_txn(
+ txn, [event_id],
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ allow_rejected=allow_rejected,
+ )
+
+ return events[0] if events else None
+
+ def _get_events_from_cache(self, events, check_redacted, get_prev_content,
+ allow_rejected):
+ event_map = {}
+
+ for event_id in events:
+ try:
+ ret = self._get_event_cache.get(
+ (event_id, check_redacted, get_prev_content,)
+ )
+
+ if allow_rejected or not ret.rejected_reason:
+ event_map[event_id] = ret
+ else:
+ event_map[event_id] = None
+ except KeyError:
+ pass
+
+ return event_map
+
+ def _do_fetch(self, conn):
+ """Takes a database connection and waits for requests for events from
+ the _event_fetch_list queue.
+ """
+ event_list = []
+ i = 0
+ while True:
+ try:
+ with self._event_fetch_lock:
+ event_list = self._event_fetch_list
+ self._event_fetch_list = []
+
+ if not event_list:
+ single_threaded = self.database_engine.single_threaded
+ if single_threaded or i > EVENT_QUEUE_ITERATIONS:
+ self._event_fetch_ongoing -= 1
+ return
+ else:
+ self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
+ i += 1
+ continue
+ i = 0
+
+ event_id_lists = zip(*event_list)[0]
+ event_ids = [
+ item for sublist in event_id_lists for item in sublist
+ ]
+
+ rows = self._new_transaction(
+ conn, "do_fetch", [], self._fetch_event_rows, event_ids
+ )
+
+ row_dict = {
+ r["event_id"]: r
+ for r in rows
+ }
+
+ # We only want to resolve deferreds from the main thread
+ def fire(lst, res):
+ for ids, d in lst:
+ if not d.called:
+ try:
+ d.callback([
+ res[i]
+ for i in ids
+ if i in res
+ ])
+ except:
+ logger.exception("Failed to callback")
+ reactor.callFromThread(fire, event_list, row_dict)
+ except Exception as e:
+ logger.exception("do_fetch")
+
+ # We only want to resolve deferreds from the main thread
+ def fire(evs):
+ for _, d in evs:
+ if not d.called:
+ d.errback(e)
+
+ if event_list:
+ reactor.callFromThread(fire, event_list)
+
+ @defer.inlineCallbacks
+ def _enqueue_events(self, events, check_redacted=True,
+ get_prev_content=False, allow_rejected=False):
+ """Fetches events from the database using the _event_fetch_list. This
+ allows batch and bulk fetching of events - it allows us to fetch events
+ without having to create a new transaction for each request for events.
+ """
+ if not events:
+ defer.returnValue({})
+
+ events_d = defer.Deferred()
+ with self._event_fetch_lock:
+ self._event_fetch_list.append(
+ (events, events_d)
+ )
+
+ self._event_fetch_lock.notify()
+
+ if self._event_fetch_ongoing < EVENT_QUEUE_THREADS:
+ self._event_fetch_ongoing += 1
+ should_start = True
+ else:
+ should_start = False
+
+ if should_start:
+ self.runWithConnection(
+ self._do_fetch
+ )
+
+ rows = yield preserve_context_over_deferred(events_d)
+
+ if not allow_rejected:
+ rows[:] = [r for r in rows if not r["rejects"]]
+
+ res = yield defer.gatherResults(
+ [
+ self._get_event_from_row(
+ row["internal_metadata"], row["json"], row["redacts"],
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ rejected_reason=row["rejects"],
+ )
+ for row in rows
+ ],
+ consumeErrors=True
+ )
+
+ defer.returnValue({
+ e.event_id: e
+ for e in res if e
+ })
+
+ def _fetch_event_rows(self, txn, events):
+ rows = []
+ N = 200
+ for i in range(1 + len(events) / N):
+ evs = events[i*N:(i + 1)*N]
+ if not evs:
+ break
+
+ sql = (
+ "SELECT "
+ " e.event_id as event_id, "
+ " e.internal_metadata,"
+ " e.json,"
+ " r.redacts as redacts,"
+ " rej.event_id as rejects "
+ " FROM event_json as e"
+ " LEFT JOIN rejections as rej USING (event_id)"
+ " LEFT JOIN redactions as r ON e.event_id = r.redacts"
+ " WHERE e.event_id IN (%s)"
+ ) % (",".join(["?"]*len(evs)),)
+
+ txn.execute(sql, evs)
+ rows.extend(self.cursor_to_dict(txn))
+
+ return rows
+
+ def _fetch_events_txn(self, txn, events, check_redacted=True,
+ get_prev_content=False, allow_rejected=False):
+ if not events:
+ return {}
+
+ rows = self._fetch_event_rows(
+ txn, events,
+ )
+
+ if not allow_rejected:
+ rows[:] = [r for r in rows if not r["rejects"]]
+
+ res = [
+ self._get_event_from_row_txn(
+ txn,
+ row["internal_metadata"], row["json"], row["redacts"],
+ check_redacted=check_redacted,
+ get_prev_content=get_prev_content,
+ rejected_reason=row["rejects"],
+ )
+ for row in rows
+ ]
+
+ return {
+ r.event_id: r
+ for r in res
+ }
+
+ @defer.inlineCallbacks
+ def _get_event_from_row(self, internal_metadata, js, redacted,
+ check_redacted=True, get_prev_content=False,
+ rejected_reason=None):
+ d = json.loads(js)
+ internal_metadata = json.loads(internal_metadata)
+
+ if rejected_reason:
+ rejected_reason = yield self._simple_select_one_onecol(
+ table="rejections",
+ keyvalues={"event_id": rejected_reason},
+ retcol="reason",
+ desc="_get_event_from_row",
+ )
+
+ ev = FrozenEvent(
+ d,
+ internal_metadata_dict=internal_metadata,
+ rejected_reason=rejected_reason,
+ )
+
+ if check_redacted and redacted:
+ ev = prune_event(ev)
+
+ redaction_id = yield self._simple_select_one_onecol(
+ table="redactions",
+ keyvalues={"redacts": ev.event_id},
+ retcol="event_id",
+ desc="_get_event_from_row",
+ )
+
+ ev.unsigned["redacted_by"] = redaction_id
+ # Get the redaction event.
+
+ because = yield self.get_event(
+ redaction_id,
+ check_redacted=False,
+ allow_none=True,
+ )
+
+ if because:
+ # It's fine to do add the event directly, since get_pdu_json
+ # will serialise this field correctly
+ ev.unsigned["redacted_because"] = because
+
+ if get_prev_content and "replaces_state" in ev.unsigned:
+ prev = yield self.get_event(
+ ev.unsigned["replaces_state"],
+ get_prev_content=False,
+ allow_none=True,
+ )
+ if prev:
+ ev.unsigned["prev_content"] = prev.content
+ ev.unsigned["prev_sender"] = prev.sender
+
+ self._get_event_cache.prefill(
+ (ev.event_id, check_redacted, get_prev_content), ev
+ )
+
+ defer.returnValue(ev)
+
+ def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
+ check_redacted=True, get_prev_content=False,
+ rejected_reason=None):
+ d = json.loads(js)
+ internal_metadata = json.loads(internal_metadata)
+
+ if rejected_reason:
+ rejected_reason = self._simple_select_one_onecol_txn(
+ txn,
+ table="rejections",
+ keyvalues={"event_id": rejected_reason},
+ retcol="reason",
+ )
+
+ ev = FrozenEvent(
+ d,
+ internal_metadata_dict=internal_metadata,
+ rejected_reason=rejected_reason,
+ )
+
+ if check_redacted and redacted:
+ ev = prune_event(ev)
+
+ redaction_id = self._simple_select_one_onecol_txn(
+ txn,
+ table="redactions",
+ keyvalues={"redacts": ev.event_id},
+ retcol="event_id",
+ )
+
+ ev.unsigned["redacted_by"] = redaction_id
+ # Get the redaction event.
+
+ because = self._get_event_txn(
+ txn,
+ redaction_id,
+ check_redacted=False
+ )
+
+ if because:
+ ev.unsigned["redacted_because"] = because
+
+ if get_prev_content and "replaces_state" in ev.unsigned:
+ prev = self._get_event_txn(
+ txn,
+ ev.unsigned["replaces_state"],
+ get_prev_content=False,
+ )
+ if prev:
+ ev.unsigned["prev_content"] = prev.content
+ ev.unsigned["prev_sender"] = prev.sender
+
+ self._get_event_cache.prefill(
+ (ev.event_id, check_redacted, get_prev_content), ev
+ )
+
+ return ev
+
+ def _parse_events_txn(self, txn, rows):
+ event_ids = [r["event_id"] for r in rows]
+
+ return self._get_events_txn(txn, event_ids)
+
+ @defer.inlineCallbacks
+ def count_daily_messages(self):
+ """
+ Returns an estimate of the number of messages sent in the last day.
+
+ If it has been significantly less or more than one day since the last
+ call to this function, it will return None.
+ """
+ def _count_messages(txn):
+ now = self.hs.get_clock().time()
+
+ txn.execute(
+ "SELECT reported_stream_token, reported_time FROM stats_reporting"
+ )
+ last_reported = self.cursor_to_dict(txn)
+
+ txn.execute(
+ "SELECT stream_ordering"
+ " FROM events"
+ " ORDER BY stream_ordering DESC"
+ " LIMIT 1"
+ )
+ now_reporting = self.cursor_to_dict(txn)
+ if not now_reporting:
+ return None
+ now_reporting = now_reporting[0]["stream_ordering"]
+
+ txn.execute("DELETE FROM stats_reporting")
+ txn.execute(
+ "INSERT INTO stats_reporting"
+ " (reported_stream_token, reported_time)"
+ " VALUES (?, ?)",
+ (now_reporting, now,)
+ )
+
+ if not last_reported:
+ return None
+
+ # Close enough to correct for our purposes.
+ yesterday = (now - 24 * 60 * 60)
+ if math.fabs(yesterday - last_reported[0]["reported_time"]) > 60 * 60:
+ return None
+
+ txn.execute(
+ "SELECT COUNT(*) as messages"
+ " FROM events NATURAL JOIN event_json"
+ " WHERE json like '%m.room.message%'"
+ " AND stream_ordering > ?"
+ " AND stream_ordering <= ?",
+ (
+ last_reported[0]["reported_stream_token"],
+ now_reporting,
+ )
+ )
+ rows = self.cursor_to_dict(txn)
+ if not rows:
+ return None
+ return rows[0]["messages"]
+
+ ret = yield self.runInteraction("count_messages", _count_messages)
+ defer.returnValue(ret)
diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py
new file mode 100644
index 00000000..fcd43c7f
--- /dev/null
+++ b/synapse/storage/filtering.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from ._base import SQLBaseStore
+
+import simplejson as json
+
+
+class FilteringStore(SQLBaseStore):
+ @defer.inlineCallbacks
+ def get_user_filter(self, user_localpart, filter_id):
+ def_json = yield self._simple_select_one_onecol(
+ table="user_filters",
+ keyvalues={
+ "user_id": user_localpart,
+ "filter_id": filter_id,
+ },
+ retcol="filter_json",
+ allow_none=False,
+ desc="get_user_filter",
+ )
+
+ defer.returnValue(json.loads(str(def_json).decode("utf-8")))
+
+ def add_user_filter(self, user_localpart, user_filter):
+ def_json = json.dumps(user_filter).encode("utf-8")
+
+ # Need an atomic transaction to SELECT the maximal ID so far then
+ # INSERT a new one
+ def _do_txn(txn):
+ sql = (
+ "SELECT MAX(filter_id) FROM user_filters "
+ "WHERE user_id = ?"
+ )
+ txn.execute(sql, (user_localpart,))
+ max_id = txn.fetchone()[0]
+ if max_id is None:
+ filter_id = 0
+ else:
+ filter_id = max_id + 1
+
+ sql = (
+ "INSERT INTO user_filters (user_id, filter_id, filter_json)"
+ "VALUES(?, ?, ?)"
+ )
+ txn.execute(sql, (user_localpart, filter_id, def_json))
+
+ return filter_id
+
+ return self.runInteraction("add_user_filter", _do_txn)
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
new file mode 100644
index 00000000..344cacdc
--- /dev/null
+++ b/synapse/storage/keys.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from _base import SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+
+from twisted.internet import defer
+
+import OpenSSL
+from signedjson.key import decode_verify_key_bytes
+import hashlib
+
+
+class KeyStore(SQLBaseStore):
+ """Persistence for signature verification keys and tls X.509 certificates
+ """
+
+ @defer.inlineCallbacks
+ def get_server_certificate(self, server_name):
+ """Retrieve the TLS X.509 certificate for the given server
+ Args:
+ server_name (bytes): The name of the server.
+ Returns:
+ (OpenSSL.crypto.X509): The tls certificate.
+ """
+ tls_certificate_bytes, = yield self._simple_select_one(
+ table="server_tls_certificates",
+ keyvalues={"server_name": server_name},
+ retcols=("tls_certificate",),
+ )
+ tls_certificate = OpenSSL.crypto.load_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1, tls_certificate_bytes,
+ )
+ defer.returnValue(tls_certificate)
+
+ def store_server_certificate(self, server_name, from_server, time_now_ms,
+ tls_certificate):
+ """Stores the TLS X.509 certificate for the given server
+ Args:
+ server_name (str): The name of the server.
+ from_server (str): Where the certificate was looked up
+ time_now_ms (int): The time now in milliseconds
+ tls_certificate (OpenSSL.crypto.X509): The X.509 certificate.
+ """
+ tls_certificate_bytes = OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1, tls_certificate
+ )
+ fingerprint = hashlib.sha256(tls_certificate_bytes).hexdigest()
+ return self._simple_upsert(
+ table="server_tls_certificates",
+ keyvalues={
+ "server_name": server_name,
+ "fingerprint": fingerprint,
+ },
+ values={
+ "from_server": from_server,
+ "ts_added_ms": time_now_ms,
+ "tls_certificate": buffer(tls_certificate_bytes),
+ },
+ desc="store_server_certificate",
+ )
+
+ @cachedInlineCallbacks()
+ def get_all_server_verify_keys(self, server_name):
+ rows = yield self._simple_select_list(
+ table="server_signature_keys",
+ keyvalues={
+ "server_name": server_name,
+ },
+ retcols=["key_id", "verify_key"],
+ desc="get_all_server_verify_keys",
+ )
+
+ defer.returnValue({
+ row["key_id"]: decode_verify_key_bytes(
+ row["key_id"], str(row["verify_key"])
+ )
+ for row in rows
+ })
+
+ @defer.inlineCallbacks
+ def get_server_verify_keys(self, server_name, key_ids):
+ """Retrieve the NACL verification key for a given server for the given
+ key_ids
+ Args:
+ server_name (str): The name of the server.
+ key_ids (list of str): List of key_ids to try and look up.
+ Returns:
+ (list of VerifyKey): The verification keys.
+ """
+ keys = yield self.get_all_server_verify_keys(server_name)
+ defer.returnValue({
+ k: keys[k]
+ for k in key_ids
+ if k in keys and keys[k]
+ })
+
+ @defer.inlineCallbacks
+ def store_server_verify_key(self, server_name, from_server, time_now_ms,
+ verify_key):
+ """Stores a NACL verification key for the given server.
+ Args:
+ server_name (str): The name of the server.
+ key_id (str): The version of the key for the server.
+ from_server (str): Where the verification key was looked up
+ ts_now_ms (int): The time now in milliseconds
+ verification_key (VerifyKey): The NACL verify key.
+ """
+ yield self._simple_upsert(
+ table="server_signature_keys",
+ keyvalues={
+ "server_name": server_name,
+ "key_id": "%s:%s" % (verify_key.alg, verify_key.version),
+ },
+ values={
+ "from_server": from_server,
+ "ts_added_ms": time_now_ms,
+ "verify_key": buffer(verify_key.encode()),
+ },
+ desc="store_server_verify_key",
+ )
+
+ self.get_all_server_verify_keys.invalidate((server_name,))
+
+ def store_server_keys_json(self, server_name, key_id, from_server,
+ ts_now_ms, ts_expires_ms, key_json_bytes):
+ """Stores the JSON bytes for a set of keys from a server
+ The JSON should be signed by the originating server, the intermediate
+ server, and by this server. Updates the value for the
+ (server_name, key_id, from_server) triplet if one already existed.
+ Args:
+ server_name (str): The name of the server.
+ key_id (str): The identifer of the key this JSON is for.
+ from_server (str): The server this JSON was fetched from.
+ ts_now_ms (int): The time now in milliseconds.
+ ts_valid_until_ms (int): The time when this json stops being valid.
+ key_json (bytes): The encoded JSON.
+ """
+ return self._simple_upsert(
+ table="server_keys_json",
+ keyvalues={
+ "server_name": server_name,
+ "key_id": key_id,
+ "from_server": from_server,
+ },
+ values={
+ "server_name": server_name,
+ "key_id": key_id,
+ "from_server": from_server,
+ "ts_added_ms": ts_now_ms,
+ "ts_valid_until_ms": ts_expires_ms,
+ "key_json": buffer(key_json_bytes),
+ },
+ desc="store_server_keys_json",
+ )
+
+ def get_server_keys_json(self, server_keys):
+ """Retrive the key json for a list of server_keys and key ids.
+ If no keys are found for a given server, key_id and source then
+ that server, key_id, and source triplet entry will be an empty list.
+ The JSON is returned as a byte array so that it can be efficiently
+ used in an HTTP response.
+ Args:
+ server_keys (list): List of (server_name, key_id, source) triplets.
+ Returns:
+ Dict mapping (server_name, key_id, source) triplets to dicts with
+ "ts_valid_until_ms" and "key_json" keys.
+ """
+ def _get_server_keys_json_txn(txn):
+ results = {}
+ for server_name, key_id, from_server in server_keys:
+ keyvalues = {"server_name": server_name}
+ if key_id is not None:
+ keyvalues["key_id"] = key_id
+ if from_server is not None:
+ keyvalues["from_server"] = from_server
+ rows = self._simple_select_list_txn(
+ txn,
+ "server_keys_json",
+ keyvalues=keyvalues,
+ retcols=(
+ "key_id",
+ "from_server",
+ "ts_added_ms",
+ "ts_valid_until_ms",
+ "key_json",
+ ),
+ )
+ results[(server_name, key_id, from_server)] = rows
+ return results
+ return self.runInteraction(
+ "get_server_keys_json", _get_server_keys_json_txn
+ )
diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py
new file mode 100644
index 00000000..7bf57234
--- /dev/null
+++ b/synapse/storage/media_repository.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from _base import SQLBaseStore
+
+
+class MediaRepositoryStore(SQLBaseStore):
+ """Persistence for attachments and avatars"""
+
+ def get_default_thumbnails(self, top_level_type, sub_type):
+ return []
+
+ def get_local_media(self, media_id):
+ """Get the metadata for a local piece of media
+ Returns:
+ None if the meia_id doesn't exist.
+ """
+ return self._simple_select_one(
+ "local_media_repository",
+ {"media_id": media_id},
+ ("media_type", "media_length", "upload_name", "created_ts"),
+ allow_none=True,
+ desc="get_local_media",
+ )
+
+ def store_local_media(self, media_id, media_type, time_now_ms, upload_name,
+ media_length, user_id):
+ return self._simple_insert(
+ "local_media_repository",
+ {
+ "media_id": media_id,
+ "media_type": media_type,
+ "created_ts": time_now_ms,
+ "upload_name": upload_name,
+ "media_length": media_length,
+ "user_id": user_id.to_string(),
+ },
+ desc="store_local_media",
+ )
+
+ def get_local_media_thumbnails(self, media_id):
+ return self._simple_select_list(
+ "local_media_repository_thumbnails",
+ {"media_id": media_id},
+ (
+ "thumbnail_width", "thumbnail_height", "thumbnail_method",
+ "thumbnail_type", "thumbnail_length",
+ ),
+ desc="get_local_media_thumbnails",
+ )
+
+ def store_local_thumbnail(self, media_id, thumbnail_width,
+ thumbnail_height, thumbnail_type,
+ thumbnail_method, thumbnail_length):
+ return self._simple_insert(
+ "local_media_repository_thumbnails",
+ {
+ "media_id": media_id,
+ "thumbnail_width": thumbnail_width,
+ "thumbnail_height": thumbnail_height,
+ "thumbnail_method": thumbnail_method,
+ "thumbnail_type": thumbnail_type,
+ "thumbnail_length": thumbnail_length,
+ },
+ desc="store_local_thumbnail",
+ )
+
+ def get_cached_remote_media(self, origin, media_id):
+ return self._simple_select_one(
+ "remote_media_cache",
+ {"media_origin": origin, "media_id": media_id},
+ (
+ "media_type", "media_length", "upload_name", "created_ts",
+ "filesystem_id",
+ ),
+ allow_none=True,
+ desc="get_cached_remote_media",
+ )
+
+ def store_cached_remote_media(self, origin, media_id, media_type,
+ media_length, time_now_ms, upload_name,
+ filesystem_id):
+ return self._simple_insert(
+ "remote_media_cache",
+ {
+ "media_origin": origin,
+ "media_id": media_id,
+ "media_type": media_type,
+ "media_length": media_length,
+ "created_ts": time_now_ms,
+ "upload_name": upload_name,
+ "filesystem_id": filesystem_id,
+ },
+ desc="store_cached_remote_media",
+ )
+
+ def get_remote_media_thumbnails(self, origin, media_id):
+ return self._simple_select_list(
+ "remote_media_cache_thumbnails",
+ {"media_origin": origin, "media_id": media_id},
+ (
+ "thumbnail_width", "thumbnail_height", "thumbnail_method",
+ "thumbnail_type", "thumbnail_length", "filesystem_id",
+ ),
+ desc="get_remote_media_thumbnails",
+ )
+
+ def store_remote_media_thumbnail(self, origin, media_id, filesystem_id,
+ thumbnail_width, thumbnail_height,
+ thumbnail_type, thumbnail_method,
+ thumbnail_length):
+ return self._simple_insert(
+ "remote_media_cache_thumbnails",
+ {
+ "media_origin": origin,
+ "media_id": media_id,
+ "thumbnail_width": thumbnail_width,
+ "thumbnail_height": thumbnail_height,
+ "thumbnail_method": thumbnail_method,
+ "thumbnail_type": thumbnail_type,
+ "thumbnail_length": thumbnail_length,
+ "filesystem_id": filesystem_id,
+ },
+ desc="store_remote_media_thumbnail",
+ )
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
new file mode 100644
index 00000000..1a74d6e3
--- /dev/null
+++ b/synapse/storage/prepare_database.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fnmatch
+import imp
+import logging
+import os
+import re
+
+
+logger = logging.getLogger(__name__)
+
+
+# Remember to update this number every time a change is made to database
+# schema files, so the users will be informed on server restarts.
+SCHEMA_VERSION = 25
+
+dir_path = os.path.abspath(os.path.dirname(__file__))
+
+
+def read_schema(path):
+ """ Read the named database schema.
+
+ Args:
+ path: Path of the database schema.
+ Returns:
+ A string containing the database schema.
+ """
+ with open(path) as schema_file:
+ return schema_file.read()
+
+
+class PrepareDatabaseException(Exception):
+ pass
+
+
+class UpgradeDatabaseException(PrepareDatabaseException):
+ pass
+
+
+def prepare_database(db_conn, database_engine):
+ """Prepares a database for usage. Will either create all necessary tables
+ or upgrade from an older schema version.
+ """
+ try:
+ cur = db_conn.cursor()
+ version_info = _get_or_create_schema_state(cur, database_engine)
+
+ if version_info:
+ user_version, delta_files, upgraded = version_info
+ _upgrade_existing_database(
+ cur, user_version, delta_files, upgraded, database_engine
+ )
+ else:
+ _setup_new_database(cur, database_engine)
+
+ # cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
+
+ cur.close()
+ db_conn.commit()
+ except:
+ db_conn.rollback()
+ raise
+
+
+def _setup_new_database(cur, database_engine):
+ """Sets up the database by finding a base set of "full schemas" and then
+ applying any necessary deltas.
+
+ The "full_schemas" directory has subdirectories named after versions. This
+ function searches for the highest version less than or equal to
+ `SCHEMA_VERSION` and executes all .sql files in that directory.
+
+ The function will then apply all deltas for all versions after the base
+ version.
+
+ Example directory structure:
+
+ schema/
+ delta/
+ ...
+ full_schemas/
+ 3/
+ test.sql
+ ...
+ 11/
+ foo.sql
+ bar.sql
+ ...
+
+ In the example foo.sql and bar.sql would be run, and then any delta files
+ for versions strictly greater than 11.
+ """
+ current_dir = os.path.join(dir_path, "schema", "full_schemas")
+ directory_entries = os.listdir(current_dir)
+
+ valid_dirs = []
+ pattern = re.compile(r"^\d+(\.sql)?$")
+ for filename in directory_entries:
+ match = pattern.match(filename)
+ abs_path = os.path.join(current_dir, filename)
+ if match and os.path.isdir(abs_path):
+ ver = int(match.group(0))
+ if ver <= SCHEMA_VERSION:
+ valid_dirs.append((ver, abs_path))
+ else:
+ logger.warn("Unexpected entry in 'full_schemas': %s", filename)
+
+ if not valid_dirs:
+ raise PrepareDatabaseException(
+ "Could not find a suitable base set of full schemas"
+ )
+
+ max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0])
+
+ logger.debug("Initialising schema v%d", max_current_ver)
+
+ directory_entries = os.listdir(sql_dir)
+
+ for filename in fnmatch.filter(directory_entries, "*.sql"):
+ sql_loc = os.path.join(sql_dir, filename)
+ logger.debug("Applying schema %s", sql_loc)
+ executescript(cur, sql_loc)
+
+ cur.execute(
+ database_engine.convert_param_style(
+ "INSERT INTO schema_version (version, upgraded)"
+ " VALUES (?,?)"
+ ),
+ (max_current_ver, False,)
+ )
+
+ _upgrade_existing_database(
+ cur,
+ current_version=max_current_ver,
+ applied_delta_files=[],
+ upgraded=False,
+ database_engine=database_engine,
+ )
+
+
+def _upgrade_existing_database(cur, current_version, applied_delta_files,
+ upgraded, database_engine):
+ """Upgrades an existing database.
+
+ Delta files can either be SQL stored in *.sql files, or python modules
+ in *.py.
+
+ There can be multiple delta files per version. Synapse will keep track of
+ which delta files have been applied, and will apply any that haven't been
+ even if there has been no version bump. This is useful for development
+ where orthogonal schema changes may happen on separate branches.
+
+ Different delta files for the same version *must* be orthogonal and give
+ the same result when applied in any order. No guarantees are made on the
+ order of execution of these scripts.
+
+ This is a no-op of current_version == SCHEMA_VERSION.
+
+ Example directory structure:
+
+ schema/
+ delta/
+ 11/
+ foo.sql
+ ...
+ 12/
+ foo.sql
+ bar.py
+ ...
+ full_schemas/
+ ...
+
+ In the example, if current_version is 11, then foo.sql will be run if and
+ only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
+ some arbitrary order.
+
+ Args:
+ cur (Cursor)
+ current_version (int): The current version of the schema.
+ applied_delta_files (list): A list of deltas that have already been
+ applied.
+ upgraded (bool): Whether the current version was generated by having
+ applied deltas or from full schema file. If `True` the function
+ will never apply delta files for the given `current_version`, since
+ the current_version wasn't generated by applying those delta files.
+ """
+
+ if current_version > SCHEMA_VERSION:
+ raise ValueError(
+ "Cannot use this database as it is too " +
+ "new for the server to understand"
+ )
+
+ start_ver = current_version
+ if not upgraded:
+ start_ver += 1
+
+ logger.debug("applied_delta_files: %s", applied_delta_files)
+
+ for v in range(start_ver, SCHEMA_VERSION + 1):
+ logger.debug("Upgrading schema to v%d", v)
+
+ delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
+
+ try:
+ directory_entries = os.listdir(delta_dir)
+ except OSError:
+ logger.exception("Could not open delta dir for version %d", v)
+ raise UpgradeDatabaseException(
+ "Could not open delta dir for version %d" % (v,)
+ )
+
+ directory_entries.sort()
+ for file_name in directory_entries:
+ relative_path = os.path.join(str(v), file_name)
+ logger.debug("Found file: %s", relative_path)
+ if relative_path in applied_delta_files:
+ continue
+
+ absolute_path = os.path.join(
+ dir_path, "schema", "delta", relative_path,
+ )
+ root_name, ext = os.path.splitext(file_name)
+ if ext == ".py":
+ # This is a python upgrade module. We need to import into some
+ # package and then execute its `run_upgrade` function.
+ module_name = "synapse.storage.v%d_%s" % (
+ v, root_name
+ )
+ with open(absolute_path) as python_file:
+ module = imp.load_source(
+ module_name, absolute_path, python_file
+ )
+ logger.debug("Running script %s", relative_path)
+ module.run_upgrade(cur, database_engine)
+ elif ext == ".pyc":
+ # Sometimes .pyc files turn up anyway even though we've
+ # disabled their generation; e.g. from distribution package
+ # installers. Silently skip it
+ pass
+ elif ext == ".sql":
+ # A plain old .sql file, just read and execute it
+ logger.debug("Applying schema %s", relative_path)
+ executescript(cur, absolute_path)
+ else:
+ # Not a valid delta file.
+ logger.warn(
+ "Found directory entry that did not end in .py or"
+ " .sql: %s",
+ relative_path,
+ )
+ continue
+
+ # Mark as done.
+ cur.execute(
+ database_engine.convert_param_style(
+ "INSERT INTO applied_schema_deltas (version, file)"
+ " VALUES (?,?)",
+ ),
+ (v, relative_path)
+ )
+
+ cur.execute("DELETE FROM schema_version")
+ cur.execute(
+ database_engine.convert_param_style(
+ "INSERT INTO schema_version (version, upgraded)"
+ " VALUES (?,?)",
+ ),
+ (v, True)
+ )
+
+
+def get_statements(f):
+ statement_buffer = ""
+ in_comment = False # If we're in a /* ... */ style comment
+
+ for line in f:
+ line = line.strip()
+
+ if in_comment:
+ # Check if this line contains an end to the comment
+ comments = line.split("*/", 1)
+ if len(comments) == 1:
+ continue
+ line = comments[1]
+ in_comment = False
+
+ # Remove inline block comments
+ line = re.sub(r"/\*.*\*/", " ", line)
+
+ # Does this line start a comment?
+ comments = line.split("/*", 1)
+ if len(comments) > 1:
+ line = comments[0]
+ in_comment = True
+
+ # Deal with line comments
+ line = line.split("--", 1)[0]
+ line = line.split("//", 1)[0]
+
+ # Find *all* semicolons. We need to treat first and last entry
+ # specially.
+ statements = line.split(";")
+
+ # We must prepend statement_buffer to the first statement
+ first_statement = "%s %s" % (
+ statement_buffer.strip(),
+ statements[0].strip()
+ )
+ statements[0] = first_statement
+
+ # Every entry, except the last, is a full statement
+ for statement in statements[:-1]:
+ yield statement.strip()
+
+ # The last entry did *not* end in a semicolon, so we store it for the
+ # next semicolon we find
+ statement_buffer = statements[-1].strip()
+
+
+def executescript(txn, schema_path):
+ with open(schema_path, 'r') as f:
+ for statement in get_statements(f):
+ txn.execute(statement)
+
+
+def _get_or_create_schema_state(txn, database_engine):
+ # Bluntly try creating the schema_version tables.
+ schema_path = os.path.join(
+ dir_path, "schema", "schema_version.sql",
+ )
+ executescript(txn, schema_path)
+
+ txn.execute("SELECT version, upgraded FROM schema_version")
+ row = txn.fetchone()
+ current_version = int(row[0]) if row else None
+ upgraded = bool(row[1]) if row else None
+
+ if current_version:
+ txn.execute(
+ database_engine.convert_param_style(
+ "SELECT file FROM applied_schema_deltas WHERE version >= ?"
+ ),
+ (current_version,)
+ )
+ applied_deltas = [d for d, in txn.fetchall()]
+ return current_version, applied_deltas, upgraded
+
+ return None
+
+
+def prepare_sqlite3_database(db_conn):
+ """This function should be called before `prepare_database` on sqlite3
+ databases.
+
+ Since we changed the way we store the current schema version and handle
+ updates to schemas, we need a way to upgrade from the old method to the
+ new. This only affects sqlite databases since they were the only ones
+ supported at the time.
+ """
+ with db_conn:
+ schema_path = os.path.join(
+ dir_path, "schema", "schema_version.sql",
+ )
+ create_schema = read_schema(schema_path)
+ db_conn.executescript(create_schema)
+
+ c = db_conn.execute("SELECT * FROM schema_version")
+ rows = c.fetchall()
+ c.close()
+
+ if not rows:
+ c = db_conn.execute("PRAGMA user_version")
+ row = c.fetchone()
+ c.close()
+
+ if row and row[0]:
+ db_conn.execute(
+ "REPLACE INTO schema_version (version, upgraded)"
+ " VALUES (?,?)",
+ (row[0], False)
+ )
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
new file mode 100644
index 00000000..34ca3b9a
--- /dev/null
+++ b/synapse/storage/presence.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached, cachedList
+
+from twisted.internet import defer
+
+
+class PresenceStore(SQLBaseStore):
+ def create_presence(self, user_localpart):
+ res = self._simple_insert(
+ table="presence",
+ values={"user_id": user_localpart},
+ desc="create_presence",
+ )
+
+ self.get_presence_state.invalidate((user_localpart,))
+ return res
+
+ def has_presence_state(self, user_localpart):
+ return self._simple_select_one(
+ table="presence",
+ keyvalues={"user_id": user_localpart},
+ retcols=["user_id"],
+ allow_none=True,
+ desc="has_presence_state",
+ )
+
+ @cached(max_entries=2000)
+ def get_presence_state(self, user_localpart):
+ return self._simple_select_one(
+ table="presence",
+ keyvalues={"user_id": user_localpart},
+ retcols=["state", "status_msg", "mtime"],
+ desc="get_presence_state",
+ )
+
+ @cachedList(get_presence_state.cache, list_name="user_localparts")
+ def get_presence_states(self, user_localparts):
+ def f(txn):
+ results = {}
+ for user_localpart in user_localparts:
+ res = self._simple_select_one_txn(
+ txn,
+ table="presence",
+ keyvalues={"user_id": user_localpart},
+ retcols=["state", "status_msg", "mtime"],
+ allow_none=True,
+ )
+ if res:
+ results[user_localpart] = res
+
+ return results
+
+ return self.runInteraction("get_presence_states", f)
+
+ def set_presence_state(self, user_localpart, new_state):
+ res = self._simple_update_one(
+ table="presence",
+ keyvalues={"user_id": user_localpart},
+ updatevalues={"state": new_state["state"],
+ "status_msg": new_state["status_msg"],
+ "mtime": self._clock.time_msec()},
+ desc="set_presence_state",
+ )
+
+ self.get_presence_state.invalidate((user_localpart,))
+ return res
+
+ def allow_presence_visible(self, observed_localpart, observer_userid):
+ return self._simple_insert(
+ table="presence_allow_inbound",
+ values={"observed_user_id": observed_localpart,
+ "observer_user_id": observer_userid},
+ desc="allow_presence_visible",
+ or_ignore=True,
+ )
+
+ def disallow_presence_visible(self, observed_localpart, observer_userid):
+ return self._simple_delete_one(
+ table="presence_allow_inbound",
+ keyvalues={"observed_user_id": observed_localpart,
+ "observer_user_id": observer_userid},
+ desc="disallow_presence_visible",
+ )
+
+ def is_presence_visible(self, observed_localpart, observer_userid):
+ return self._simple_select_one(
+ table="presence_allow_inbound",
+ keyvalues={"observed_user_id": observed_localpart,
+ "observer_user_id": observer_userid},
+ retcols=["observed_user_id"],
+ allow_none=True,
+ desc="is_presence_visible",
+ )
+
+ def add_presence_list_pending(self, observer_localpart, observed_userid):
+ return self._simple_insert(
+ table="presence_list",
+ values={"user_id": observer_localpart,
+ "observed_user_id": observed_userid,
+ "accepted": False},
+ desc="add_presence_list_pending",
+ )
+
+ @defer.inlineCallbacks
+ def set_presence_list_accepted(self, observer_localpart, observed_userid):
+ result = yield self._simple_update_one(
+ table="presence_list",
+ keyvalues={"user_id": observer_localpart,
+ "observed_user_id": observed_userid},
+ updatevalues={"accepted": True},
+ desc="set_presence_list_accepted",
+ )
+ self.get_presence_list_accepted.invalidate((observer_localpart,))
+ defer.returnValue(result)
+
+ def get_presence_list(self, observer_localpart, accepted=None):
+ if accepted:
+ return self.get_presence_list_accepted(observer_localpart)
+ else:
+ keyvalues = {"user_id": observer_localpart}
+ if accepted is not None:
+ keyvalues["accepted"] = accepted
+
+ return self._simple_select_list(
+ table="presence_list",
+ keyvalues=keyvalues,
+ retcols=["observed_user_id", "accepted"],
+ desc="get_presence_list",
+ )
+
+ @cached()
+ def get_presence_list_accepted(self, observer_localpart):
+ return self._simple_select_list(
+ table="presence_list",
+ keyvalues={"user_id": observer_localpart, "accepted": True},
+ retcols=["observed_user_id", "accepted"],
+ desc="get_presence_list_accepted",
+ )
+
+ @defer.inlineCallbacks
+ def del_presence_list(self, observer_localpart, observed_userid):
+ yield self._simple_delete_one(
+ table="presence_list",
+ keyvalues={"user_id": observer_localpart,
+ "observed_user_id": observed_userid},
+ desc="del_presence_list",
+ )
+ self.get_presence_list_accepted.invalidate((observer_localpart,))
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
new file mode 100644
index 00000000..a6e52cb2
--- /dev/null
+++ b/synapse/storage/profile.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+
+
+class ProfileStore(SQLBaseStore):
+ def create_profile(self, user_localpart):
+ return self._simple_insert(
+ table="profiles",
+ values={"user_id": user_localpart},
+ desc="create_profile",
+ )
+
+ def get_profile_displayname(self, user_localpart):
+ return self._simple_select_one_onecol(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ retcol="displayname",
+ desc="get_profile_displayname",
+ )
+
+ def set_profile_displayname(self, user_localpart, new_displayname):
+ return self._simple_update_one(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ updatevalues={"displayname": new_displayname},
+ desc="set_profile_displayname",
+ )
+
+ def get_profile_avatar_url(self, user_localpart):
+ return self._simple_select_one_onecol(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ retcol="avatar_url",
+ desc="get_profile_avatar_url",
+ )
+
+ def set_profile_avatar_url(self, user_localpart, new_avatar_url):
+ return self._simple_update_one(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ updatevalues={"avatar_url": new_avatar_url},
+ desc="set_profile_avatar_url",
+ )
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
new file mode 100644
index 00000000..5305b7e1
--- /dev/null
+++ b/synapse/storage/push_rule.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+from twisted.internet import defer
+
+import logging
+import simplejson as json
+
+logger = logging.getLogger(__name__)
+
+
+class PushRuleStore(SQLBaseStore):
+ @cachedInlineCallbacks()
+ def get_push_rules_for_user(self, user_name):
+ rows = yield self._simple_select_list(
+ table=PushRuleTable.table_name,
+ keyvalues={
+ "user_name": user_name,
+ },
+ retcols=PushRuleTable.fields,
+ desc="get_push_rules_enabled_for_user",
+ )
+
+ rows.sort(
+ key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
+ )
+
+ defer.returnValue(rows)
+
+ @cachedInlineCallbacks()
+ def get_push_rules_enabled_for_user(self, user_name):
+ results = yield self._simple_select_list(
+ table=PushRuleEnableTable.table_name,
+ keyvalues={
+ 'user_name': user_name
+ },
+ retcols=PushRuleEnableTable.fields,
+ desc="get_push_rules_enabled_for_user",
+ )
+ defer.returnValue({
+ r['rule_id']: False if r['enabled'] == 0 else True for r in results
+ })
+
+ @defer.inlineCallbacks
+ def add_push_rule(self, before, after, **kwargs):
+ vals = kwargs
+ if 'conditions' in vals:
+ vals['conditions'] = json.dumps(vals['conditions'])
+ if 'actions' in vals:
+ vals['actions'] = json.dumps(vals['actions'])
+
+ # we could check the rest of the keys are valid column names
+ # but sqlite will do that anyway so I think it's just pointless.
+ vals.pop("id", None)
+
+ if before or after:
+ ret = yield self.runInteraction(
+ "_add_push_rule_relative_txn",
+ self._add_push_rule_relative_txn,
+ before=before,
+ after=after,
+ **vals
+ )
+ defer.returnValue(ret)
+ else:
+ ret = yield self.runInteraction(
+ "_add_push_rule_highest_priority_txn",
+ self._add_push_rule_highest_priority_txn,
+ **vals
+ )
+ defer.returnValue(ret)
+
+ def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
+ after = kwargs.pop("after", None)
+ relative_to_rule = kwargs.pop("before", after)
+
+ res = self._simple_select_one_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ keyvalues={
+ "user_name": user_name,
+ "rule_id": relative_to_rule,
+ },
+ retcols=["priority_class", "priority"],
+ allow_none=True,
+ )
+
+ if not res:
+ raise RuleNotFoundException(
+ "before/after rule not found: %s" % (relative_to_rule,)
+ )
+
+ priority_class = res["priority_class"]
+ base_rule_priority = res["priority"]
+
+ if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
+ raise InconsistentRuleException(
+ "Given priority class does not match class of relative rule"
+ )
+
+ new_rule = kwargs
+ new_rule.pop("before", None)
+ new_rule.pop("after", None)
+ new_rule['priority_class'] = priority_class
+ new_rule['user_name'] = user_name
+ new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
+
+ # check if the priority before/after is free
+ new_rule_priority = base_rule_priority
+ if after:
+ new_rule_priority -= 1
+ else:
+ new_rule_priority += 1
+
+ new_rule['priority'] = new_rule_priority
+
+ sql = (
+ "SELECT COUNT(*) FROM " + PushRuleTable.table_name +
+ " WHERE user_name = ? AND priority_class = ? AND priority = ?"
+ )
+ txn.execute(sql, (user_name, priority_class, new_rule_priority))
+ res = txn.fetchall()
+ num_conflicting = res[0][0]
+
+ # if there are conflicting rules, bump everything
+ if num_conflicting:
+ sql = "UPDATE "+PushRuleTable.table_name+" SET priority = priority "
+ if after:
+ sql += "-1"
+ else:
+ sql += "+1"
+ sql += " WHERE user_name = ? AND priority_class = ? AND priority "
+ if after:
+ sql += "<= ?"
+ else:
+ sql += ">= ?"
+
+ txn.execute(sql, (user_name, priority_class, new_rule_priority))
+
+ txn.call_after(
+ self.get_push_rules_for_user.invalidate, (user_name,)
+ )
+
+ txn.call_after(
+ self.get_push_rules_enabled_for_user.invalidate, (user_name,)
+ )
+
+ self._simple_insert_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ values=new_rule,
+ )
+
+ def _add_push_rule_highest_priority_txn(self, txn, user_name,
+ priority_class, **kwargs):
+ # find the highest priority rule in that class
+ sql = (
+ "SELECT COUNT(*), MAX(priority) FROM " + PushRuleTable.table_name +
+ " WHERE user_name = ? and priority_class = ?"
+ )
+ txn.execute(sql, (user_name, priority_class))
+ res = txn.fetchall()
+ (how_many, highest_prio) = res[0]
+
+ new_prio = 0
+ if how_many > 0:
+ new_prio = highest_prio + 1
+
+ # and insert the new rule
+ new_rule = kwargs
+ new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
+ new_rule['user_name'] = user_name
+ new_rule['priority_class'] = priority_class
+ new_rule['priority'] = new_prio
+
+ txn.call_after(
+ self.get_push_rules_for_user.invalidate, (user_name,)
+ )
+ txn.call_after(
+ self.get_push_rules_enabled_for_user.invalidate, (user_name,)
+ )
+
+ self._simple_insert_txn(
+ txn,
+ table=PushRuleTable.table_name,
+ values=new_rule,
+ )
+
+ @defer.inlineCallbacks
+ def delete_push_rule(self, user_name, rule_id):
+ """
+ Delete a push rule. Args specify the row to be deleted and can be
+ any of the columns in the push_rule table, but below are the
+ standard ones
+
+ Args:
+ user_name (str): The matrix ID of the push rule owner
+ rule_id (str): The rule_id of the rule to be deleted
+ """
+ yield self._simple_delete_one(
+ PushRuleTable.table_name,
+ {'user_name': user_name, 'rule_id': rule_id},
+ desc="delete_push_rule",
+ )
+
+ self.get_push_rules_for_user.invalidate((user_name,))
+ self.get_push_rules_enabled_for_user.invalidate((user_name,))
+
+ @defer.inlineCallbacks
+ def set_push_rule_enabled(self, user_name, rule_id, enabled):
+ ret = yield self.runInteraction(
+ "_set_push_rule_enabled_txn",
+ self._set_push_rule_enabled_txn,
+ user_name, rule_id, enabled
+ )
+ defer.returnValue(ret)
+
+ def _set_push_rule_enabled_txn(self, txn, user_name, rule_id, enabled):
+ new_id = self._push_rules_enable_id_gen.get_next_txn(txn)
+ self._simple_upsert_txn(
+ txn,
+ PushRuleEnableTable.table_name,
+ {'user_name': user_name, 'rule_id': rule_id},
+ {'enabled': 1 if enabled else 0},
+ {'id': new_id},
+ )
+ txn.call_after(
+ self.get_push_rules_for_user.invalidate, (user_name,)
+ )
+ txn.call_after(
+ self.get_push_rules_enabled_for_user.invalidate, (user_name,)
+ )
+
+
+class RuleNotFoundException(Exception):
+ pass
+
+
+class InconsistentRuleException(Exception):
+ pass
+
+
+class PushRuleTable(object):
+ table_name = "push_rules"
+
+ fields = [
+ "id",
+ "user_name",
+ "rule_id",
+ "priority_class",
+ "priority",
+ "conditions",
+ "actions",
+ ]
+
+
+class PushRuleEnableTable(object):
+ table_name = "push_rules_enable"
+
+ fields = [
+ "user_name",
+ "rule_id",
+ "enabled"
+ ]
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
new file mode 100644
index 00000000..345c4e11
--- /dev/null
+++ b/synapse/storage/pusher.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+
+from canonicaljson import encode_canonical_json
+
+import logging
+import simplejson as json
+import types
+
+logger = logging.getLogger(__name__)
+
+
+class PusherStore(SQLBaseStore):
+ def _decode_pushers_rows(self, rows):
+ for r in rows:
+ dataJson = r['data']
+ r['data'] = None
+ try:
+ if isinstance(dataJson, types.BufferType):
+ dataJson = str(dataJson).decode("UTF8")
+
+ r['data'] = json.loads(dataJson)
+ except Exception as e:
+ logger.warn(
+ "Invalid JSON in data for pusher %d: %s, %s",
+ r['id'], dataJson, e.message,
+ )
+ pass
+
+ if isinstance(r['pushkey'], types.BufferType):
+ r['pushkey'] = str(r['pushkey']).decode("UTF8")
+
+ return rows
+
+ @defer.inlineCallbacks
+ def get_pushers_by_app_id_and_pushkey(self, app_id, pushkey):
+ def r(txn):
+ sql = (
+ "SELECT * FROM pushers"
+ " WHERE app_id = ? AND pushkey = ?"
+ )
+
+ txn.execute(sql, (app_id, pushkey,))
+ rows = self.cursor_to_dict(txn)
+
+ return self._decode_pushers_rows(rows)
+
+ rows = yield self.runInteraction(
+ "get_pushers_by_app_id_and_pushkey", r
+ )
+
+ defer.returnValue(rows)
+
+ @defer.inlineCallbacks
+ def get_all_pushers(self):
+ def get_pushers(txn):
+ txn.execute("SELECT * FROM pushers")
+ rows = self.cursor_to_dict(txn)
+
+ return self._decode_pushers_rows(rows)
+
+ rows = yield self.runInteraction("get_all_pushers", get_pushers)
+ defer.returnValue(rows)
+
+ @defer.inlineCallbacks
+ def add_pusher(self, user_name, access_token, profile_tag, kind, app_id,
+ app_display_name, device_display_name,
+ pushkey, pushkey_ts, lang, data):
+ try:
+ next_id = yield self._pushers_id_gen.get_next()
+ yield self._simple_upsert(
+ PushersTable.table_name,
+ dict(
+ app_id=app_id,
+ pushkey=pushkey,
+ user_name=user_name,
+ ),
+ dict(
+ access_token=access_token,
+ kind=kind,
+ profile_tag=profile_tag,
+ app_display_name=app_display_name,
+ device_display_name=device_display_name,
+ ts=pushkey_ts,
+ lang=lang,
+ data=encode_canonical_json(data),
+ ),
+ insertion_values=dict(
+ id=next_id,
+ ),
+ desc="add_pusher",
+ )
+ except Exception as e:
+ logger.error("create_pusher with failed: %s", e)
+ raise StoreError(500, "Problem creating pusher.")
+
+ @defer.inlineCallbacks
+ def delete_pusher_by_app_id_pushkey_user_name(self, app_id, pushkey, user_name):
+ yield self._simple_delete_one(
+ PushersTable.table_name,
+ {"app_id": app_id, "pushkey": pushkey, 'user_name': user_name},
+ desc="delete_pusher_by_app_id_pushkey_user_name",
+ )
+
+ @defer.inlineCallbacks
+ def update_pusher_last_token(self, app_id, pushkey, user_name, last_token):
+ yield self._simple_update_one(
+ PushersTable.table_name,
+ {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name},
+ {'last_token': last_token},
+ desc="update_pusher_last_token",
+ )
+
+ @defer.inlineCallbacks
+ def update_pusher_last_token_and_success(self, app_id, pushkey, user_name,
+ last_token, last_success):
+ yield self._simple_update_one(
+ PushersTable.table_name,
+ {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name},
+ {'last_token': last_token, 'last_success': last_success},
+ desc="update_pusher_last_token_and_success",
+ )
+
+ @defer.inlineCallbacks
+ def update_pusher_failing_since(self, app_id, pushkey, user_name,
+ failing_since):
+ yield self._simple_update_one(
+ PushersTable.table_name,
+ {'app_id': app_id, 'pushkey': pushkey, 'user_name': user_name},
+ {'failing_since': failing_since},
+ desc="update_pusher_failing_since",
+ )
+
+
+class PushersTable(object):
+ table_name = "pushers"
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
new file mode 100644
index 00000000..a5350635
--- /dev/null
+++ b/synapse/storage/receipts.py
@@ -0,0 +1,406 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
+from synapse.util.caches import cache_counter, caches_by_name
+
+from twisted.internet import defer
+
+from blist import sorteddict
+import logging
+import ujson as json
+
+
+logger = logging.getLogger(__name__)
+
+
+class ReceiptsStore(SQLBaseStore):
+ def __init__(self, hs):
+ super(ReceiptsStore, self).__init__(hs)
+
+ self._receipts_stream_cache = _RoomStreamChangeCache()
+
+ @defer.inlineCallbacks
+ def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None):
+ """Get receipts for multiple rooms for sending to clients.
+
+ Args:
+ room_ids (list): List of room_ids.
+ to_key (int): Max stream id to fetch receipts upto.
+ from_key (int): Min stream id to fetch receipts from. None fetches
+ from the start.
+
+ Returns:
+ list: A list of receipts.
+ """
+ room_ids = set(room_ids)
+
+ if from_key:
+ room_ids = yield self._receipts_stream_cache.get_rooms_changed(
+ self, room_ids, from_key
+ )
+
+ results = yield self._get_linearized_receipts_for_rooms(
+ room_ids, to_key, from_key=from_key
+ )
+
+ defer.returnValue([ev for res in results.values() for ev in res])
+
+ @cachedInlineCallbacks(num_args=3, max_entries=5000)
+ def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None):
+ """Get receipts for a single room for sending to clients.
+
+ Args:
+ room_ids (str): The room id.
+ to_key (int): Max stream id to fetch receipts upto.
+ from_key (int): Min stream id to fetch receipts from. None fetches
+ from the start.
+
+ Returns:
+ list: A list of receipts.
+ """
+ def f(txn):
+ if from_key:
+ sql = (
+ "SELECT * FROM receipts_linearized WHERE"
+ " room_id = ? AND stream_id > ? AND stream_id <= ?"
+ )
+
+ txn.execute(
+ sql,
+ (room_id, from_key, to_key)
+ )
+ else:
+ sql = (
+ "SELECT * FROM receipts_linearized WHERE"
+ " room_id = ? AND stream_id <= ?"
+ )
+
+ txn.execute(
+ sql,
+ (room_id, to_key)
+ )
+
+ rows = self.cursor_to_dict(txn)
+
+ return rows
+
+ rows = yield self.runInteraction(
+ "get_linearized_receipts_for_room", f
+ )
+
+ if not rows:
+ defer.returnValue([])
+
+ content = {}
+ for row in rows:
+ content.setdefault(
+ row["event_id"], {}
+ ).setdefault(
+ row["receipt_type"], {}
+ )[row["user_id"]] = json.loads(row["data"])
+
+ defer.returnValue([{
+ "type": "m.receipt",
+ "room_id": room_id,
+ "content": content,
+ }])
+
+ @cachedList(cache=get_linearized_receipts_for_room.cache, list_name="room_ids",
+ num_args=3, inlineCallbacks=True)
+ def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None):
+ if not room_ids:
+ defer.returnValue({})
+
+ def f(txn):
+ if from_key:
+ sql = (
+ "SELECT * FROM receipts_linearized WHERE"
+ " room_id IN (%s) AND stream_id > ? AND stream_id <= ?"
+ ) % (
+ ",".join(["?"] * len(room_ids))
+ )
+ args = list(room_ids)
+ args.extend([from_key, to_key])
+
+ txn.execute(sql, args)
+ else:
+ sql = (
+ "SELECT * FROM receipts_linearized WHERE"
+ " room_id IN (%s) AND stream_id <= ?"
+ ) % (
+ ",".join(["?"] * len(room_ids))
+ )
+
+ args = list(room_ids)
+ args.append(to_key)
+
+ txn.execute(sql, args)
+
+ return self.cursor_to_dict(txn)
+
+ txn_results = yield self.runInteraction(
+ "_get_linearized_receipts_for_rooms", f
+ )
+
+ results = {}
+ for row in txn_results:
+ # We want a single event per room, since we want to batch the
+ # receipts by room, event and type.
+ room_event = results.setdefault(row["room_id"], {
+ "type": "m.receipt",
+ "room_id": row["room_id"],
+ "content": {},
+ })
+
+ # The content is of the form:
+ # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. }
+ event_entry = room_event["content"].setdefault(row["event_id"], {})
+ receipt_type = event_entry.setdefault(row["receipt_type"], {})
+
+ receipt_type[row["user_id"]] = json.loads(row["data"])
+
+ results = {
+ room_id: [results[room_id]] if room_id in results else []
+ for room_id in room_ids
+ }
+ defer.returnValue(results)
+
+ def get_max_receipt_stream_id(self):
+ return self._receipts_id_gen.get_max_token(self)
+
+ @cachedInlineCallbacks()
+ def get_graph_receipts_for_room(self, room_id):
+ """Get receipts for sending to remote servers.
+ """
+ rows = yield self._simple_select_list(
+ table="receipts_graph",
+ keyvalues={"room_id": room_id},
+ retcols=["receipt_type", "user_id", "event_id"],
+ desc="get_linearized_receipts_for_room",
+ )
+
+ result = {}
+ for row in rows:
+ result.setdefault(
+ row["user_id"], {}
+ ).setdefault(
+ row["receipt_type"], []
+ ).append(row["event_id"])
+
+ defer.returnValue(result)
+
+ def insert_linearized_receipt_txn(self, txn, room_id, receipt_type,
+ user_id, event_id, data, stream_id):
+
+ # We don't want to clobber receipts for more recent events, so we
+ # have to compare orderings of existing receipts
+ sql = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " INNER JOIN receipts_linearized as r USING (event_id, room_id)"
+ " WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?"
+ )
+
+ txn.execute(sql, (room_id, receipt_type, user_id))
+ results = txn.fetchall()
+
+ if results:
+ res = self._simple_select_one_txn(
+ txn,
+ table="events",
+ retcols=["topological_ordering", "stream_ordering"],
+ keyvalues={"event_id": event_id},
+ )
+ topological_ordering = int(res["topological_ordering"])
+ stream_ordering = int(res["stream_ordering"])
+
+ for to, so, _ in results:
+ if int(to) > topological_ordering:
+ return False
+ elif int(to) == topological_ordering and int(so) >= stream_ordering:
+ return False
+
+ self._simple_delete_txn(
+ txn,
+ table="receipts_linearized",
+ keyvalues={
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ )
+
+ self._simple_insert_txn(
+ txn,
+ table="receipts_linearized",
+ values={
+ "stream_id": stream_id,
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "event_id": event_id,
+ "data": json.dumps(data),
+ }
+ )
+
+ return True
+
+ @defer.inlineCallbacks
+ def insert_receipt(self, room_id, receipt_type, user_id, event_ids, data):
+ """Insert a receipt, either from local client or remote server.
+
+ Automatically does conversion between linearized and graph
+ representations.
+ """
+ if not event_ids:
+ return
+
+ if len(event_ids) == 1:
+ linearized_event_id = event_ids[0]
+ else:
+ # we need to points in graph -> linearized form.
+ # TODO: Make this better.
+ def graph_to_linear(txn):
+ query = (
+ "SELECT event_id WHERE room_id = ? AND stream_ordering IN ("
+ " SELECT max(stream_ordering) WHERE event_id IN (%s)"
+ ")"
+ ) % (",".join(["?"] * len(event_ids)))
+
+ txn.execute(query, [room_id] + event_ids)
+ rows = txn.fetchall()
+ if rows:
+ return rows[0][0]
+ else:
+ raise RuntimeError("Unrecognized event_ids: %r" % (event_ids,))
+
+ linearized_event_id = yield self.runInteraction(
+ "insert_receipt_conv", graph_to_linear
+ )
+
+ stream_id_manager = yield self._receipts_id_gen.get_next(self)
+ with stream_id_manager as stream_id:
+ yield self._receipts_stream_cache.room_has_changed(
+ self, room_id, stream_id
+ )
+ have_persisted = yield self.runInteraction(
+ "insert_linearized_receipt",
+ self.insert_linearized_receipt_txn,
+ room_id, receipt_type, user_id, linearized_event_id,
+ data,
+ stream_id=stream_id,
+ )
+
+ if not have_persisted:
+ defer.returnValue(None)
+
+ yield self.insert_graph_receipt(
+ room_id, receipt_type, user_id, event_ids, data
+ )
+
+ max_persisted_id = yield self._stream_id_gen.get_max_token(self)
+ defer.returnValue((stream_id, max_persisted_id))
+
+ def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids,
+ data):
+ return self.runInteraction(
+ "insert_graph_receipt",
+ self.insert_graph_receipt_txn,
+ room_id, receipt_type, user_id, event_ids, data
+ )
+
+ def insert_graph_receipt_txn(self, txn, room_id, receipt_type,
+ user_id, event_ids, data):
+ self._simple_delete_txn(
+ txn,
+ table="receipts_graph",
+ keyvalues={
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ )
+ self._simple_insert_txn(
+ txn,
+ table="receipts_graph",
+ values={
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "event_ids": json.dumps(event_ids),
+ "data": json.dumps(data),
+ }
+ )
+
+
+class _RoomStreamChangeCache(object):
+ """Keeps track of the stream_id of the latest change in rooms.
+
+ Given a list of rooms and stream key, it will give a subset of rooms that
+ may have changed since that key. If the key is too old then the cache
+ will simply return all rooms.
+ """
+ def __init__(self, size_of_cache=10000):
+ self._size_of_cache = size_of_cache
+ self._room_to_key = {}
+ self._cache = sorteddict()
+ self._earliest_key = None
+ self.name = "ReceiptsRoomChangeCache"
+ caches_by_name[self.name] = self._cache
+
+ @defer.inlineCallbacks
+ def get_rooms_changed(self, store, room_ids, key):
+ """Returns subset of room ids that have had new receipts since the
+ given key. If the key is too old it will just return the given list.
+ """
+ if key > (yield self._get_earliest_key(store)):
+ keys = self._cache.keys()
+ i = keys.bisect_right(key)
+
+ result = set(
+ self._cache[k] for k in keys[i:]
+ ).intersection(room_ids)
+
+ cache_counter.inc_hits(self.name)
+ else:
+ result = room_ids
+ cache_counter.inc_misses(self.name)
+
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def room_has_changed(self, store, room_id, key):
+ """Informs the cache that the room has been changed at the given key.
+ """
+ if key > (yield self._get_earliest_key(store)):
+ old_key = self._room_to_key.get(room_id, None)
+ if old_key:
+ key = max(key, old_key)
+ self._cache.pop(old_key, None)
+ self._cache[key] = room_id
+
+ while len(self._cache) > self._size_of_cache:
+ k, r = self._cache.popitem()
+ self._earliest_key = max(k, self._earliest_key)
+ self._room_to_key.pop(r, None)
+
+ @defer.inlineCallbacks
+ def _get_earliest_key(self, store):
+ if self._earliest_key is None:
+ self._earliest_key = yield store.get_max_receipt_stream_id()
+ self._earliest_key = int(self._earliest_key)
+
+ defer.returnValue(self._earliest_key)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
new file mode 100644
index 00000000..2e5eddd2
--- /dev/null
+++ b/synapse/storage/registration.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError, Codes
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+
+
+class RegistrationStore(SQLBaseStore):
+
+ def __init__(self, hs):
+ super(RegistrationStore, self).__init__(hs)
+
+ self.clock = hs.get_clock()
+
+ @defer.inlineCallbacks
+ def add_access_token_to_user(self, user_id, token):
+ """Adds an access token for the given user.
+
+ Args:
+ user_id (str): The user ID.
+ token (str): The new access token to add.
+ Raises:
+ StoreError if there was a problem adding this.
+ """
+ next_id = yield self._access_tokens_id_gen.get_next()
+
+ yield self._simple_insert(
+ "access_tokens",
+ {
+ "id": next_id,
+ "user_id": user_id,
+ "token": token
+ },
+ desc="add_access_token_to_user",
+ )
+
+ @defer.inlineCallbacks
+ def add_refresh_token_to_user(self, user_id, token):
+ """Adds a refresh token for the given user.
+
+ Args:
+ user_id (str): The user ID.
+ token (str): The new refresh token to add.
+ Raises:
+ StoreError if there was a problem adding this.
+ """
+ next_id = yield self._refresh_tokens_id_gen.get_next()
+
+ yield self._simple_insert(
+ "refresh_tokens",
+ {
+ "id": next_id,
+ "user_id": user_id,
+ "token": token
+ },
+ desc="add_refresh_token_to_user",
+ )
+
+ @defer.inlineCallbacks
+ def register(self, user_id, token, password_hash):
+ """Attempts to register an account.
+
+ Args:
+ user_id (str): The desired user ID to register.
+ token (str): The desired access token to use for this user.
+ password_hash (str): Optional. The password hash for this user.
+ Raises:
+ StoreError if the user_id could not be registered.
+ """
+ yield self.runInteraction(
+ "register",
+ self._register, user_id, token, password_hash
+ )
+
+ def _register(self, txn, user_id, token, password_hash):
+ now = int(self.clock.time())
+
+ next_id = self._access_tokens_id_gen.get_next_txn(txn)
+
+ try:
+ txn.execute("INSERT INTO users(name, password_hash, creation_ts) "
+ "VALUES (?,?,?)",
+ [user_id, password_hash, now])
+ except self.database_engine.module.IntegrityError:
+ raise StoreError(
+ 400, "User ID already taken.", errcode=Codes.USER_IN_USE
+ )
+
+ if token:
+ # it's possible for this to get a conflict, but only for a single user
+ # since tokens are namespaced based on their user ID
+ txn.execute(
+ "INSERT INTO access_tokens(id, user_id, token)"
+ " VALUES (?,?,?)",
+ (next_id, user_id, token,)
+ )
+
+ def get_user_by_id(self, user_id):
+ return self._simple_select_one(
+ table="users",
+ keyvalues={
+ "name": user_id,
+ },
+ retcols=["name", "password_hash"],
+ allow_none=True,
+ )
+
+ def get_users_by_id_case_insensitive(self, user_id):
+ """Gets users that match user_id case insensitively.
+ Returns a mapping of user_id -> password_hash.
+ """
+ def f(txn):
+ sql = (
+ "SELECT name, password_hash FROM users"
+ " WHERE lower(name) = lower(?)"
+ )
+ txn.execute(sql, (user_id,))
+ return dict(txn.fetchall())
+
+ return self.runInteraction("get_users_by_id_case_insensitive", f)
+
+ @defer.inlineCallbacks
+ def user_set_password_hash(self, user_id, password_hash):
+ """
+ NB. This does *not* evict any cache because the one use for this
+ removes most of the entries subsequently anyway so it would be
+ pointless. Use flush_user separately.
+ """
+ yield self._simple_update_one('users', {
+ 'name': user_id
+ }, {
+ 'password_hash': password_hash
+ })
+
+ @defer.inlineCallbacks
+ def user_delete_access_tokens(self, user_id):
+ yield self.runInteraction(
+ "user_delete_access_tokens",
+ self._user_delete_access_tokens, user_id
+ )
+
+ def _user_delete_access_tokens(self, txn, user_id):
+ txn.execute(
+ "DELETE FROM access_tokens WHERE user_id = ?",
+ (user_id, )
+ )
+
+ @defer.inlineCallbacks
+ def flush_user(self, user_id):
+ rows = yield self._execute(
+ 'flush_user', None,
+ "SELECT token FROM access_tokens WHERE user_id = ?",
+ user_id
+ )
+ for r in rows:
+ self.get_user_by_access_token.invalidate((r,))
+
+ @cached()
+ def get_user_by_access_token(self, token):
+ """Get a user from the given access token.
+
+ Args:
+ token (str): The access token of a user.
+ Returns:
+ dict: Including the name (user_id) and the ID of their access token.
+ Raises:
+ StoreError if no user was found.
+ """
+ return self.runInteraction(
+ "get_user_by_access_token",
+ self._query_for_auth,
+ token
+ )
+
+ def exchange_refresh_token(self, refresh_token, token_generator):
+ """Exchange a refresh token for a new access token and refresh token.
+
+ Doing so invalidates the old refresh token - refresh tokens are single
+ use.
+
+ Args:
+ token (str): The refresh token of a user.
+ token_generator (fn: str -> str): Function which, when given a
+ user ID, returns a unique refresh token for that user. This
+ function must never return the same value twice.
+ Returns:
+ tuple of (user_id, refresh_token)
+ Raises:
+ StoreError if no user was found with that refresh token.
+ """
+ return self.runInteraction(
+ "exchange_refresh_token",
+ self._exchange_refresh_token,
+ refresh_token,
+ token_generator
+ )
+
+ def _exchange_refresh_token(self, txn, old_token, token_generator):
+ sql = "SELECT user_id FROM refresh_tokens WHERE token = ?"
+ txn.execute(sql, (old_token,))
+ rows = self.cursor_to_dict(txn)
+ if not rows:
+ raise StoreError(403, "Did not recognize refresh token")
+ user_id = rows[0]["user_id"]
+
+ # TODO(danielwh): Maybe perform a validation on the macaroon that
+ # macaroon.user_id == user_id.
+
+ new_token = token_generator(user_id)
+ sql = "UPDATE refresh_tokens SET token = ? WHERE token = ?"
+ txn.execute(sql, (new_token, old_token,))
+
+ return user_id, new_token
+
+ @defer.inlineCallbacks
+ def is_server_admin(self, user):
+ res = yield self._simple_select_one_onecol(
+ table="users",
+ keyvalues={"name": user.to_string()},
+ retcol="admin",
+ allow_none=True,
+ desc="is_server_admin",
+ )
+
+ defer.returnValue(res if res else False)
+
+ def _query_for_auth(self, txn, token):
+ sql = (
+ "SELECT users.name, access_tokens.id as token_id"
+ " FROM users"
+ " INNER JOIN access_tokens on users.name = access_tokens.user_id"
+ " WHERE token = ?"
+ )
+
+ txn.execute(sql, (token,))
+ rows = self.cursor_to_dict(txn)
+ if rows:
+ return rows[0]
+
+ return None
+
+ @defer.inlineCallbacks
+ def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
+ yield self._simple_upsert("user_threepids", {
+ "user_id": user_id,
+ "medium": medium,
+ "address": address,
+ }, {
+ "validated_at": validated_at,
+ "added_at": added_at,
+ })
+
+ @defer.inlineCallbacks
+ def user_get_threepids(self, user_id):
+ ret = yield self._simple_select_list(
+ "user_threepids", {
+ "user_id": user_id
+ },
+ ['medium', 'address', 'validated_at', 'added_at'],
+ 'user_get_threepids'
+ )
+ defer.returnValue(ret)
+
+ @defer.inlineCallbacks
+ def get_user_id_by_threepid(self, medium, address):
+ ret = yield self._simple_select_one(
+ "user_threepids",
+ {
+ "medium": medium,
+ "address": address
+ },
+ ['user_id'], True, 'get_user_id_by_threepid'
+ )
+ if ret:
+ defer.returnValue(ret['user_id'])
+ defer.returnValue(None)
+
+ @defer.inlineCallbacks
+ def count_all_users(self):
+ """Counts all users registered on the homeserver."""
+ def _count_users(txn):
+ txn.execute("SELECT COUNT(*) AS users FROM users")
+ rows = self.cursor_to_dict(txn)
+ if rows:
+ return rows[0]["users"]
+ return 0
+
+ ret = yield self.runInteraction("count_users", _count_users)
+ defer.returnValue(ret)
diff --git a/synapse/storage/rejections.py b/synapse/storage/rejections.py
new file mode 100644
index 00000000..0838eb3d
--- /dev/null
+++ b/synapse/storage/rejections.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class RejectionsStore(SQLBaseStore):
+ def _store_rejections_txn(self, txn, event_id, reason):
+ self._simple_insert_txn(
+ txn,
+ table="rejections",
+ values={
+ "event_id": event_id,
+ "reason": reason,
+ "last_check": self._clock.time_msec(),
+ },
+ )
+
+ def get_rejection_reason(self, event_id):
+ return self._simple_select_one_onecol(
+ table="rejections",
+ retcol="reason",
+ keyvalues={
+ "event_id": event_id,
+ },
+ allow_none=True,
+ desc="get_rejection_reason",
+ )
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
new file mode 100644
index 00000000..4f08df47
--- /dev/null
+++ b/synapse/storage/room.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+from .engines import PostgresEngine, Sqlite3Engine
+
+import collections
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+OpsLevel = collections.namedtuple(
+ "OpsLevel",
+ ("ban_level", "kick_level", "redact_level",)
+)
+
+
+class RoomStore(SQLBaseStore):
+
+ @defer.inlineCallbacks
+ def store_room(self, room_id, room_creator_user_id, is_public):
+ """Stores a room.
+
+ Args:
+ room_id (str): The desired room ID, can be None.
+ room_creator_user_id (str): The user ID of the room creator.
+ is_public (bool): True to indicate that this room should appear in
+ public room lists.
+ Raises:
+ StoreError if the room could not be stored.
+ """
+ try:
+ yield self._simple_insert(
+ RoomsTable.table_name,
+ {
+ "room_id": room_id,
+ "creator": room_creator_user_id,
+ "is_public": is_public,
+ },
+ desc="store_room",
+ )
+ except Exception as e:
+ logger.error("store_room with room_id=%s failed: %s", room_id, e)
+ raise StoreError(500, "Problem creating room.")
+
+ def get_room(self, room_id):
+ """Retrieve a room.
+
+ Args:
+ room_id (str): The ID of the room to retrieve.
+ Returns:
+ A namedtuple containing the room information, or an empty list.
+ """
+ return self._simple_select_one(
+ table=RoomsTable.table_name,
+ keyvalues={"room_id": room_id},
+ retcols=RoomsTable.fields,
+ desc="get_room",
+ allow_none=True,
+ )
+
+ def get_public_room_ids(self):
+ return self._simple_select_onecol(
+ table="rooms",
+ keyvalues={
+ "is_public": True,
+ },
+ retcol="room_id",
+ desc="get_public_room_ids",
+ )
+
+ @defer.inlineCallbacks
+ def get_rooms(self, is_public):
+ """Retrieve a list of all public rooms.
+
+ Args:
+ is_public (bool): True if the rooms returned should be public.
+ Returns:
+ A list of room dicts containing at least a "room_id" key, a
+ "topic" key if one is set, and a "name" key if one is set
+ """
+
+ def f(txn):
+ def subquery(table_name, column_name=None):
+ column_name = column_name or table_name
+ return (
+ "SELECT %(table_name)s.event_id as event_id, "
+ "%(table_name)s.room_id as room_id, %(column_name)s "
+ "FROM %(table_name)s "
+ "INNER JOIN current_state_events as c "
+ "ON c.event_id = %(table_name)s.event_id " % {
+ "column_name": column_name,
+ "table_name": table_name,
+ }
+ )
+
+ sql = (
+ "SELECT"
+ " r.room_id,"
+ " max(n.name),"
+ " max(t.topic),"
+ " max(v.history_visibility),"
+ " max(g.guest_access)"
+ " FROM rooms AS r"
+ " LEFT JOIN (%(topic)s) AS t ON t.room_id = r.room_id"
+ " LEFT JOIN (%(name)s) AS n ON n.room_id = r.room_id"
+ " LEFT JOIN (%(history_visibility)s) AS v ON v.room_id = r.room_id"
+ " LEFT JOIN (%(guest_access)s) AS g ON g.room_id = r.room_id"
+ " WHERE r.is_public = ?"
+ " GROUP BY r.room_id" % {
+ "topic": subquery("topics", "topic"),
+ "name": subquery("room_names", "name"),
+ "history_visibility": subquery("history_visibility"),
+ "guest_access": subquery("guest_access"),
+ }
+ )
+
+ txn.execute(sql, (is_public,))
+
+ rows = txn.fetchall()
+
+ for i, row in enumerate(rows):
+ room_id = row[0]
+ aliases = self._simple_select_onecol_txn(
+ txn,
+ table="room_aliases",
+ keyvalues={
+ "room_id": room_id
+ },
+ retcol="room_alias",
+ )
+
+ rows[i] = list(row) + [aliases]
+
+ return rows
+
+ rows = yield self.runInteraction(
+ "get_rooms", f
+ )
+
+ ret = [
+ {
+ "room_id": r[0],
+ "name": r[1],
+ "topic": r[2],
+ "world_readable": r[3] == "world_readable",
+ "guest_can_join": r[4] == "can_join",
+ "aliases": r[5],
+ }
+ for r in rows
+ if r[5] # We only return rooms that have at least one alias.
+ ]
+
+ defer.returnValue(ret)
+
+ def _store_room_topic_txn(self, txn, event):
+ if hasattr(event, "content") and "topic" in event.content:
+ self._simple_insert_txn(
+ txn,
+ "topics",
+ {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "topic": event.content["topic"],
+ },
+ )
+
+ self._store_event_search_txn(
+ txn, event, "content.topic", event.content["topic"]
+ )
+
+ def _store_room_name_txn(self, txn, event):
+ if hasattr(event, "content") and "name" in event.content:
+ self._simple_insert_txn(
+ txn,
+ "room_names",
+ {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "name": event.content["name"],
+ }
+ )
+
+ self._store_event_search_txn(
+ txn, event, "content.name", event.content["name"]
+ )
+
+ def _store_room_message_txn(self, txn, event):
+ if hasattr(event, "content") and "body" in event.content:
+ self._store_event_search_txn(
+ txn, event, "content.body", event.content["body"]
+ )
+
+ def _store_history_visibility_txn(self, txn, event):
+ self._store_content_index_txn(txn, event, "history_visibility")
+
+ def _store_guest_access_txn(self, txn, event):
+ self._store_content_index_txn(txn, event, "guest_access")
+
+ def _store_content_index_txn(self, txn, event, key):
+ if hasattr(event, "content") and key in event.content:
+ sql = (
+ "INSERT INTO %(key)s"
+ " (event_id, room_id, %(key)s)"
+ " VALUES (?, ?, ?)" % {"key": key}
+ )
+ txn.execute(sql, (
+ event.event_id,
+ event.room_id,
+ event.content[key]
+ ))
+
+ def _store_event_search_txn(self, txn, event, key, value):
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = (
+ "INSERT INTO event_search (event_id, room_id, key, vector)"
+ " VALUES (?,?,?,to_tsvector('english', ?))"
+ )
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ sql = (
+ "INSERT INTO event_search (event_id, room_id, key, value)"
+ " VALUES (?,?,?,?)"
+ )
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
+
+ txn.execute(sql, (event.event_id, event.room_id, key, value,))
+
+ @cachedInlineCallbacks()
+ def get_room_name_and_aliases(self, room_id):
+ def f(txn):
+ sql = (
+ "SELECT event_id FROM current_state_events "
+ "WHERE room_id = ? "
+ )
+
+ sql += " AND ((type = 'm.room.name' AND state_key = '')"
+ sql += " OR type = 'm.room.aliases')"
+
+ txn.execute(sql, (room_id,))
+ results = self.cursor_to_dict(txn)
+
+ return self._parse_events_txn(txn, results)
+
+ events = yield self.runInteraction("get_room_name_and_aliases", f)
+
+ name = None
+ aliases = []
+
+ for e in events:
+ if e.type == 'm.room.name':
+ if 'name' in e.content:
+ name = e.content['name']
+ elif e.type == 'm.room.aliases':
+ if 'aliases' in e.content:
+ aliases.extend(e.content['aliases'])
+
+ defer.returnValue((name, aliases))
+
+
+class RoomsTable(object):
+ table_name = "rooms"
+
+ fields = [
+ "room_id",
+ "is_public",
+ "creator"
+ ]
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
new file mode 100644
index 00000000..ae1ad56d
--- /dev/null
+++ b/synapse/storage/roommember.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from collections import namedtuple
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+
+from synapse.api.constants import Membership
+from synapse.types import UserID
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+RoomsForUser = namedtuple(
+ "RoomsForUser",
+ ("room_id", "sender", "membership", "event_id", "stream_ordering")
+)
+
+
+class RoomMemberStore(SQLBaseStore):
+
+ def _store_room_members_txn(self, txn, events):
+ """Store a room member in the database.
+ """
+ self._simple_insert_many_txn(
+ txn,
+ table="room_memberships",
+ values=[
+ {
+ "event_id": event.event_id,
+ "user_id": event.state_key,
+ "sender": event.user_id,
+ "room_id": event.room_id,
+ "membership": event.membership,
+ }
+ for event in events
+ ]
+ )
+
+ for event in events:
+ txn.call_after(self.get_rooms_for_user.invalidate, (event.state_key,))
+ txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
+ txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
+
+ def get_room_member(self, user_id, room_id):
+ """Retrieve the current state of a room member.
+
+ Args:
+ user_id (str): The member's user ID.
+ room_id (str): The room the member is in.
+ Returns:
+ Deferred: Results in a MembershipEvent or None.
+ """
+ return self.runInteraction(
+ "get_room_member",
+ self._get_members_events_txn,
+ room_id,
+ user_id=user_id,
+ ).addCallback(
+ self._get_events
+ ).addCallback(
+ lambda events: events[0] if events else None
+ )
+
+ @cached(max_entries=5000)
+ def get_users_in_room(self, room_id):
+ def f(txn):
+
+ rows = self._get_members_rows_txn(
+ txn,
+ room_id=room_id,
+ membership=Membership.JOIN,
+ )
+
+ return [r["user_id"] for r in rows]
+ return self.runInteraction("get_users_in_room", f)
+
+ def get_room_members(self, room_id, membership=None):
+ """Retrieve the current room member list for a room.
+
+ Args:
+ room_id (str): The room to get the list of members.
+ membership (synapse.api.constants.Membership): The filter to apply
+ to this list, or None to return all members with some state
+ associated with this room.
+ Returns:
+ list of namedtuples representing the members in this room.
+ """
+ return self.runInteraction(
+ "get_room_members",
+ self._get_members_events_txn,
+ room_id,
+ membership=membership,
+ ).addCallback(self._get_events)
+
+ def get_invites_for_user(self, user_id):
+ """ Get all the invite events for a user
+ Args:
+ user_id (str): The user ID.
+ Returns:
+ A deferred list of event objects.
+ """
+
+ return self.get_rooms_for_user_where_membership_is(
+ user_id, [Membership.INVITE]
+ ).addCallback(lambda invites: self._get_events([
+ invites.event_id for invite in invites
+ ]))
+
+ def get_leave_and_ban_events_for_user(self, user_id):
+ """ Get all the leave events for a user
+ Args:
+ user_id (str): The user ID.
+ Returns:
+ A deferred list of event objects.
+ """
+ return self.get_rooms_for_user_where_membership_is(
+ user_id, (Membership.LEAVE, Membership.BAN)
+ ).addCallback(lambda leaves: self._get_events([
+ leave.event_id for leave in leaves
+ ]))
+
+ def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
+ """ Get all the rooms for this user where the membership for this user
+ matches one in the membership list.
+
+ Args:
+ user_id (str): The user ID.
+ membership_list (list): A list of synapse.api.constants.Membership
+ values which the user must be in.
+ Returns:
+ A list of dictionary objects, with room_id, membership and sender
+ defined.
+ """
+ if not membership_list:
+ return defer.succeed(None)
+
+ return self.runInteraction(
+ "get_rooms_for_user_where_membership_is",
+ self._get_rooms_for_user_where_membership_is_txn,
+ user_id, membership_list
+ )
+
+ def _get_rooms_for_user_where_membership_is_txn(self, txn, user_id,
+ membership_list):
+ where_clause = "user_id = ? AND (%s)" % (
+ " OR ".join(["membership = ?" for _ in membership_list]),
+ )
+
+ args = [user_id]
+ args.extend(membership_list)
+
+ sql = (
+ "SELECT m.room_id, m.sender, m.membership, m.event_id, e.stream_ordering"
+ " FROM current_state_events as c"
+ " INNER JOIN room_memberships as m"
+ " ON m.event_id = c.event_id"
+ " INNER JOIN events as e"
+ " ON e.event_id = c.event_id"
+ " AND m.room_id = c.room_id"
+ " AND m.user_id = c.state_key"
+ " WHERE %s"
+ ) % (where_clause,)
+
+ txn.execute(sql, args)
+ return [
+ RoomsForUser(**r) for r in self.cursor_to_dict(txn)
+ ]
+
+ @cached(max_entries=5000)
+ def get_joined_hosts_for_room(self, room_id):
+ return self.runInteraction(
+ "get_joined_hosts_for_room",
+ self._get_joined_hosts_for_room_txn,
+ room_id,
+ )
+
+ def _get_joined_hosts_for_room_txn(self, txn, room_id):
+ rows = self._get_members_rows_txn(
+ txn,
+ room_id, membership=Membership.JOIN
+ )
+
+ joined_domains = set(
+ UserID.from_string(r["user_id"]).domain
+ for r in rows
+ )
+
+ return joined_domains
+
+ def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None):
+ rows = self._get_members_rows_txn(
+ txn,
+ room_id, membership, user_id,
+ )
+ return [r["event_id"] for r in rows]
+
+ def _get_members_rows_txn(self, txn, room_id, membership=None, user_id=None):
+ where_clause = "c.room_id = ?"
+ where_values = [room_id]
+
+ if membership:
+ where_clause += " AND m.membership = ?"
+ where_values.append(membership)
+
+ if user_id:
+ where_clause += " AND m.user_id = ?"
+ where_values.append(user_id)
+
+ sql = (
+ "SELECT m.* FROM room_memberships as m"
+ " INNER JOIN current_state_events as c"
+ " ON m.event_id = c.event_id "
+ " AND m.room_id = c.room_id "
+ " AND m.user_id = c.state_key"
+ " WHERE %(where)s"
+ ) % {
+ "where": where_clause,
+ }
+
+ txn.execute(sql, where_values)
+ rows = self.cursor_to_dict(txn)
+
+ return rows
+
+ @cached()
+ def get_rooms_for_user(self, user_id):
+ return self.get_rooms_for_user_where_membership_is(
+ user_id, membership_list=[Membership.JOIN],
+ )
+
+ @defer.inlineCallbacks
+ def user_rooms_intersect(self, user_id_list):
+ """ Checks whether all the users whose IDs are given in a list share a
+ room.
+
+ This is a "hot path" function that's called a lot, e.g. by presence for
+ generating the event stream. As such, it is implemented locally by
+ wrapping logic around heavily-cached database queries.
+ """
+ if len(user_id_list) < 2:
+ defer.returnValue(True)
+
+ deferreds = [self.get_rooms_for_user(u) for u in user_id_list]
+
+ results = yield defer.DeferredList(deferreds, consumeErrors=True)
+
+ # A list of sets of strings giving room IDs for each user
+ room_id_lists = [set([r.room_id for r in result[1]]) for result in results]
+
+ # There isn't a setintersection(*list_of_sets)
+ ret = len(room_id_lists.pop(0).intersection(*room_id_lists)) > 0
+
+ defer.returnValue(ret)
diff --git a/synapse/storage/schema/delta/11/v11.sql b/synapse/storage/schema/delta/11/v11.sql
new file mode 100644
index 00000000..31359222
--- /dev/null
+++ b/synapse/storage/schema/delta/11/v11.sql
@@ -0,0 +1,16 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id); \ No newline at end of file
diff --git a/synapse/storage/schema/delta/12/v12.sql b/synapse/storage/schema/delta/12/v12.sql
new file mode 100644
index 00000000..878c3626
--- /dev/null
+++ b/synapse/storage/schema/delta/12/v12.sql
@@ -0,0 +1,63 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS rejections(
+ event_id TEXT NOT NULL,
+ reason TEXT NOT NULL,
+ last_check TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+-- Push notification endpoints that users have configured
+CREATE TABLE IF NOT EXISTS pushers (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_name TEXT NOT NULL,
+ profile_tag VARCHAR(32) NOT NULL,
+ kind VARCHAR(8) NOT NULL,
+ app_id VARCHAR(64) NOT NULL,
+ app_display_name VARCHAR(64) NOT NULL,
+ device_display_name VARCHAR(128) NOT NULL,
+ pushkey VARBINARY(512) NOT NULL,
+ ts BIGINT UNSIGNED NOT NULL,
+ lang VARCHAR(8),
+ data LONGBLOB,
+ last_token TEXT,
+ last_success BIGINT UNSIGNED,
+ failing_since BIGINT UNSIGNED,
+ UNIQUE (app_id, pushkey)
+);
+
+CREATE TABLE IF NOT EXISTS push_rules (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_name TEXT NOT NULL,
+ rule_id TEXT NOT NULL,
+ priority_class TINYINT NOT NULL,
+ priority INTEGER NOT NULL DEFAULT 0,
+ conditions TEXT NOT NULL,
+ actions TEXT NOT NULL,
+ UNIQUE(user_name, rule_id)
+);
+
+CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name);
+
+CREATE TABLE IF NOT EXISTS user_filters(
+ user_id TEXT,
+ filter_id BIGINT UNSIGNED,
+ filter_json LONGBLOB
+);
+
+CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters(
+ user_id, filter_id
+);
diff --git a/synapse/storage/schema/delta/13/v13.sql b/synapse/storage/schema/delta/13/v13.sql
new file mode 100644
index 00000000..32659240
--- /dev/null
+++ b/synapse/storage/schema/delta/13/v13.sql
@@ -0,0 +1,31 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS application_services(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ url TEXT,
+ token TEXT,
+ hs_token TEXT,
+ sender TEXT,
+ UNIQUE(token)
+);
+
+CREATE TABLE IF NOT EXISTS application_services_regex(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ as_id BIGINT UNSIGNED NOT NULL,
+ namespace INTEGER, /* enum[room_id|room_alias|user_id] */
+ regex TEXT,
+ FOREIGN KEY(as_id) REFERENCES application_services(id)
+);
diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py
new file mode 100644
index 00000000..61232f97
--- /dev/null
+++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py
@@ -0,0 +1,37 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def run_upgrade(cur, *args, **kwargs):
+ cur.execute("SELECT id, regex FROM application_services_regex")
+ for row in cur.fetchall():
+ try:
+ logger.debug("Checking %s..." % row[0])
+ json.loads(row[1])
+ except ValueError:
+ # row isn't in json, make it so.
+ string_regex = row[1]
+ new_regex = json.dumps({
+ "regex": string_regex,
+ "exclusive": True
+ })
+ cur.execute(
+ "UPDATE application_services_regex SET regex=? WHERE id=?",
+ (new_regex, row[0])
+ )
diff --git a/synapse/storage/schema/delta/14/v14.sql b/synapse/storage/schema/delta/14/v14.sql
new file mode 100644
index 00000000..1d09ad7a
--- /dev/null
+++ b/synapse/storage/schema/delta/14/v14.sql
@@ -0,0 +1,23 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS push_rules_enable (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_name TEXT NOT NULL,
+ rule_id TEXT NOT NULL,
+ enabled TINYINT,
+ UNIQUE(user_name, rule_id)
+);
+
+CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name);
diff --git a/synapse/storage/schema/delta/15/appservice_txns.sql b/synapse/storage/schema/delta/15/appservice_txns.sql
new file mode 100644
index 00000000..db2e7203
--- /dev/null
+++ b/synapse/storage/schema/delta/15/appservice_txns.sql
@@ -0,0 +1,31 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS application_services_state(
+ as_id TEXT PRIMARY KEY,
+ state VARCHAR(5),
+ last_txn INTEGER
+);
+
+CREATE TABLE IF NOT EXISTS application_services_txns(
+ as_id TEXT NOT NULL,
+ txn_id INTEGER NOT NULL,
+ event_ids TEXT NOT NULL,
+ UNIQUE(as_id, txn_id)
+);
+
+CREATE INDEX IF NOT EXISTS application_services_txns_id ON application_services_txns (
+ as_id
+);
diff --git a/synapse/storage/schema/delta/15/presence_indices.sql b/synapse/storage/schema/delta/15/presence_indices.sql
new file mode 100644
index 00000000..6b8d0f1c
--- /dev/null
+++ b/synapse/storage/schema/delta/15/presence_indices.sql
@@ -0,0 +1,2 @@
+
+CREATE INDEX IF NOT EXISTS presence_list_user_id ON presence_list (user_id);
diff --git a/synapse/storage/schema/delta/15/v15.sql b/synapse/storage/schema/delta/15/v15.sql
new file mode 100644
index 00000000..f5b2a08c
--- /dev/null
+++ b/synapse/storage/schema/delta/15/v15.sql
@@ -0,0 +1,25 @@
+-- Drop, copy & recreate pushers table to change unique key
+-- Also add access_token column at the same time
+CREATE TABLE IF NOT EXISTS pushers2 (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_name TEXT NOT NULL,
+ access_token INTEGER DEFAULT NULL,
+ profile_tag varchar(32) NOT NULL,
+ kind varchar(8) NOT NULL,
+ app_id varchar(64) NOT NULL,
+ app_display_name varchar(64) NOT NULL,
+ device_display_name varchar(128) NOT NULL,
+ pushkey blob NOT NULL,
+ ts BIGINT NOT NULL,
+ lang varchar(8),
+ data blob,
+ last_token TEXT,
+ last_success BIGINT,
+ failing_since BIGINT,
+ FOREIGN KEY(user_name) REFERENCES users(name),
+ UNIQUE (app_id, pushkey, user_name)
+);
+INSERT INTO pushers2 (id, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, ts, lang, data, last_token, last_success, failing_since)
+ SELECT id, user_name, profile_tag, kind, app_id, app_display_name, device_display_name, pushkey, ts, lang, data, last_token, last_success, failing_since FROM pushers;
+DROP TABLE pushers;
+ALTER TABLE pushers2 RENAME TO pushers;
diff --git a/synapse/storage/schema/delta/16/events_order_index.sql b/synapse/storage/schema/delta/16/events_order_index.sql
new file mode 100644
index 00000000..a48f2151
--- /dev/null
+++ b/synapse/storage/schema/delta/16/events_order_index.sql
@@ -0,0 +1,4 @@
+CREATE INDEX events_order ON events (topological_ordering, stream_ordering);
+CREATE INDEX events_order_room ON events (
+ room_id, topological_ordering, stream_ordering
+);
diff --git a/synapse/storage/schema/delta/16/remote_media_cache_index.sql b/synapse/storage/schema/delta/16/remote_media_cache_index.sql
new file mode 100644
index 00000000..7a15265c
--- /dev/null
+++ b/synapse/storage/schema/delta/16/remote_media_cache_index.sql
@@ -0,0 +1,2 @@
+CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
+ ON remote_media_cache_thumbnails (media_id); \ No newline at end of file
diff --git a/synapse/storage/schema/delta/16/remove_duplicates.sql b/synapse/storage/schema/delta/16/remove_duplicates.sql
new file mode 100644
index 00000000..65c97b5e
--- /dev/null
+++ b/synapse/storage/schema/delta/16/remove_duplicates.sql
@@ -0,0 +1,9 @@
+
+
+DELETE FROM event_to_state_groups WHERE state_group not in (
+ SELECT MAX(state_group) FROM event_to_state_groups GROUP BY event_id
+);
+
+DELETE FROM event_to_state_groups WHERE rowid not in (
+ SELECT MIN(rowid) FROM event_to_state_groups GROUP BY event_id
+);
diff --git a/synapse/storage/schema/delta/16/room_alias_index.sql b/synapse/storage/schema/delta/16/room_alias_index.sql
new file mode 100644
index 00000000..f8248613
--- /dev/null
+++ b/synapse/storage/schema/delta/16/room_alias_index.sql
@@ -0,0 +1,3 @@
+
+CREATE INDEX IF NOT EXISTS room_aliases_id ON room_aliases(room_id);
+CREATE INDEX IF NOT EXISTS room_alias_servers_alias ON room_alias_servers(room_alias);
diff --git a/synapse/storage/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/delta/16/unique_constraints.sql
new file mode 100644
index 00000000..fecf1111
--- /dev/null
+++ b/synapse/storage/schema/delta/16/unique_constraints.sql
@@ -0,0 +1,80 @@
+
+-- We can use SQLite features here, since other db support was only added in v16
+
+--
+DELETE FROM current_state_events WHERE rowid not in (
+ SELECT MIN(rowid) FROM current_state_events GROUP BY event_id
+);
+
+DROP INDEX IF EXISTS current_state_events_event_id;
+CREATE UNIQUE INDEX current_state_events_event_id ON current_state_events(event_id);
+
+--
+DELETE FROM room_memberships WHERE rowid not in (
+ SELECT MIN(rowid) FROM room_memberships GROUP BY event_id
+);
+
+DROP INDEX IF EXISTS room_memberships_event_id;
+CREATE UNIQUE INDEX room_memberships_event_id ON room_memberships(event_id);
+
+--
+DELETE FROM feedback WHERE rowid not in (
+ SELECT MIN(rowid) FROM feedback GROUP BY event_id
+);
+
+DROP INDEX IF EXISTS feedback_event_id;
+CREATE UNIQUE INDEX feedback_event_id ON feedback(event_id);
+
+--
+DELETE FROM topics WHERE rowid not in (
+ SELECT MIN(rowid) FROM topics GROUP BY event_id
+);
+
+DROP INDEX IF EXISTS topics_event_id;
+CREATE UNIQUE INDEX topics_event_id ON topics(event_id);
+
+--
+DELETE FROM room_names WHERE rowid not in (
+ SELECT MIN(rowid) FROM room_names GROUP BY event_id
+);
+
+DROP INDEX IF EXISTS room_names_id;
+CREATE UNIQUE INDEX room_names_id ON room_names(event_id);
+
+--
+DELETE FROM presence WHERE rowid not in (
+ SELECT MIN(rowid) FROM presence GROUP BY user_id
+);
+
+DROP INDEX IF EXISTS presence_id;
+CREATE UNIQUE INDEX presence_id ON presence(user_id);
+
+--
+DELETE FROM presence_allow_inbound WHERE rowid not in (
+ SELECT MIN(rowid) FROM presence_allow_inbound
+ GROUP BY observed_user_id, observer_user_id
+);
+
+DROP INDEX IF EXISTS presence_allow_inbound_observers;
+CREATE UNIQUE INDEX presence_allow_inbound_observers ON presence_allow_inbound(
+ observed_user_id, observer_user_id
+);
+
+--
+DELETE FROM presence_list WHERE rowid not in (
+ SELECT MIN(rowid) FROM presence_list
+ GROUP BY user_id, observed_user_id
+);
+
+DROP INDEX IF EXISTS presence_list_observers;
+CREATE UNIQUE INDEX presence_list_observers ON presence_list(
+ user_id, observed_user_id
+);
+
+--
+DELETE FROM room_aliases WHERE rowid not in (
+ SELECT MIN(rowid) FROM room_aliases GROUP BY room_alias
+);
+
+DROP INDEX IF EXISTS room_aliases_id;
+CREATE INDEX room_aliases_id ON room_aliases(room_id);
diff --git a/synapse/storage/schema/delta/16/users.sql b/synapse/storage/schema/delta/16/users.sql
new file mode 100644
index 00000000..cd070925
--- /dev/null
+++ b/synapse/storage/schema/delta/16/users.sql
@@ -0,0 +1,56 @@
+-- Convert `access_tokens`.user from rowids to user strings.
+-- MUST BE DONE BEFORE REMOVING ID COLUMN FROM USERS TABLE BELOW
+CREATE TABLE IF NOT EXISTS new_access_tokens(
+ id BIGINT UNSIGNED PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT,
+ token TEXT NOT NULL,
+ last_used BIGINT UNSIGNED,
+ UNIQUE(token)
+);
+
+INSERT INTO new_access_tokens
+ SELECT a.id, u.name, a.device_id, a.token, a.last_used
+ FROM access_tokens as a
+ INNER JOIN users as u ON u.id = a.user_id;
+
+DROP TABLE access_tokens;
+
+ALTER TABLE new_access_tokens RENAME TO access_tokens;
+
+-- Remove ID column from `users` table
+CREATE TABLE IF NOT EXISTS new_users(
+ name TEXT,
+ password_hash TEXT,
+ creation_ts BIGINT UNSIGNED,
+ admin BOOL DEFAULT 0 NOT NULL,
+ UNIQUE(name)
+);
+
+INSERT INTO new_users SELECT name, password_hash, creation_ts, admin FROM users;
+
+DROP TABLE users;
+
+ALTER TABLE new_users RENAME TO users;
+
+
+-- Remove UNIQUE constraint from `user_ips` table
+CREATE TABLE IF NOT EXISTS new_user_ips (
+ user_id TEXT NOT NULL,
+ access_token TEXT NOT NULL,
+ device_id TEXT,
+ ip TEXT NOT NULL,
+ user_agent TEXT NOT NULL,
+ last_seen BIGINT UNSIGNED NOT NULL
+);
+
+INSERT INTO new_user_ips
+ SELECT user, access_token, device_id, ip, user_agent, last_seen FROM user_ips;
+
+DROP TABLE user_ips;
+
+ALTER TABLE new_user_ips RENAME TO user_ips;
+
+CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user_id);
+CREATE INDEX IF NOT EXISTS user_ips_user_ip ON user_ips(user_id, access_token, ip);
+
diff --git a/synapse/storage/schema/delta/17/drop_indexes.sql b/synapse/storage/schema/delta/17/drop_indexes.sql
new file mode 100644
index 00000000..8eb3325a
--- /dev/null
+++ b/synapse/storage/schema/delta/17/drop_indexes.sql
@@ -0,0 +1,18 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP INDEX IF EXISTS sent_transaction_dest;
+DROP INDEX IF EXISTS sent_transaction_sent;
+DROP INDEX IF EXISTS user_ips_user;
diff --git a/synapse/storage/schema/delta/17/server_keys.sql b/synapse/storage/schema/delta/17/server_keys.sql
new file mode 100644
index 00000000..513c30a7
--- /dev/null
+++ b/synapse/storage/schema/delta/17/server_keys.sql
@@ -0,0 +1,24 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS server_keys_json (
+ server_name TEXT, -- Server name.
+ key_id TEXT, -- Requested key id.
+ from_server TEXT, -- Which server the keys were fetched from.
+ ts_added_ms INTEGER, -- When the keys were fetched
+ ts_valid_until_ms INTEGER, -- When this version of the keys exipires.
+ key_json bytea, -- JSON certificate for the remote server.
+ CONSTRAINT uniqueness UNIQUE (server_name, key_id, from_server)
+);
diff --git a/synapse/storage/schema/delta/17/user_threepids.sql b/synapse/storage/schema/delta/17/user_threepids.sql
new file mode 100644
index 00000000..c17715ac
--- /dev/null
+++ b/synapse/storage/schema/delta/17/user_threepids.sql
@@ -0,0 +1,9 @@
+CREATE TABLE user_threepids (
+ user_id TEXT NOT NULL,
+ medium TEXT NOT NULL,
+ address TEXT NOT NULL,
+ validated_at BIGINT NOT NULL,
+ added_at BIGINT NOT NULL,
+ CONSTRAINT user_medium_address UNIQUE (user_id, medium, address)
+);
+CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
new file mode 100644
index 00000000..c0b0fdfb
--- /dev/null
+++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
@@ -0,0 +1,32 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS new_server_keys_json (
+ server_name TEXT NOT NULL, -- Server name.
+ key_id TEXT NOT NULL, -- Requested key id.
+ from_server TEXT NOT NULL, -- Which server the keys were fetched from.
+ ts_added_ms BIGINT NOT NULL, -- When the keys were fetched
+ ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires.
+ key_json bytea NOT NULL, -- JSON certificate for the remote server.
+ CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server)
+);
+
+INSERT INTO new_server_keys_json
+ SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ;
+
+DROP TABLE server_keys_json;
+
+ALTER TABLE new_server_keys_json RENAME TO server_keys_json;
diff --git a/synapse/storage/schema/delta/19/event_index.sql b/synapse/storage/schema/delta/19/event_index.sql
new file mode 100644
index 00000000..3881fc98
--- /dev/null
+++ b/synapse/storage/schema/delta/19/event_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE INDEX events_order_topo_stream_room ON events(
+ topological_ordering, stream_ordering, room_id
+);
diff --git a/synapse/storage/schema/delta/20/dummy.sql b/synapse/storage/schema/delta/20/dummy.sql
new file mode 100644
index 00000000..e0ac49d1
--- /dev/null
+++ b/synapse/storage/schema/delta/20/dummy.sql
@@ -0,0 +1 @@
+SELECT 1;
diff --git a/synapse/storage/schema/delta/20/pushers.py b/synapse/storage/schema/delta/20/pushers.py
new file mode 100644
index 00000000..543e57bb
--- /dev/null
+++ b/synapse/storage/schema/delta/20/pushers.py
@@ -0,0 +1,76 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Main purpose of this upgrade is to change the unique key on the
+pushers table again (it was missed when the v16 full schema was
+made) but this also changes the pushkey and data columns to text.
+When selecting a bytea column into a text column, postgres inserts
+the hex encoded data, and there's no portable way of getting the
+UTF-8 bytes, so we have to do it in Python.
+"""
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ logger.info("Porting pushers table...")
+ cur.execute("""
+ CREATE TABLE IF NOT EXISTS pushers2 (
+ id BIGINT PRIMARY KEY,
+ user_name TEXT NOT NULL,
+ access_token BIGINT DEFAULT NULL,
+ profile_tag VARCHAR(32) NOT NULL,
+ kind VARCHAR(8) NOT NULL,
+ app_id VARCHAR(64) NOT NULL,
+ app_display_name VARCHAR(64) NOT NULL,
+ device_display_name VARCHAR(128) NOT NULL,
+ pushkey TEXT NOT NULL,
+ ts BIGINT NOT NULL,
+ lang VARCHAR(8),
+ data TEXT,
+ last_token TEXT,
+ last_success BIGINT,
+ failing_since BIGINT,
+ UNIQUE (app_id, pushkey, user_name)
+ )
+ """)
+ cur.execute("""SELECT
+ id, user_name, access_token, profile_tag, kind,
+ app_id, app_display_name, device_display_name,
+ pushkey, ts, lang, data, last_token, last_success,
+ failing_since
+ FROM pushers
+ """)
+ count = 0
+ for row in cur.fetchall():
+ row = list(row)
+ row[8] = bytes(row[8]).decode("utf-8")
+ row[11] = bytes(row[11]).decode("utf-8")
+ cur.execute(database_engine.convert_param_style("""
+ INSERT into pushers2 (
+ id, user_name, access_token, profile_tag, kind,
+ app_id, app_display_name, device_display_name,
+ pushkey, ts, lang, data, last_token, last_success,
+ failing_since
+ ) values (%s)""" % (','.join(['?' for _ in range(len(row))]))),
+ row
+ )
+ count += 1
+ cur.execute("DROP TABLE pushers")
+ cur.execute("ALTER TABLE pushers2 RENAME TO pushers")
+ logger.info("Moved %d pushers to new table", count)
diff --git a/synapse/storage/schema/delta/21/end_to_end_keys.sql b/synapse/storage/schema/delta/21/end_to_end_keys.sql
new file mode 100644
index 00000000..8b4a380d
--- /dev/null
+++ b/synapse/storage/schema/delta/21/end_to_end_keys.sql
@@ -0,0 +1,34 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS e2e_device_keys_json (
+ user_id TEXT NOT NULL, -- The user these keys are for.
+ device_id TEXT NOT NULL, -- Which of the user's devices these keys are for.
+ ts_added_ms BIGINT NOT NULL, -- When the keys were uploaded.
+ key_json TEXT NOT NULL, -- The keys for the device as a JSON blob.
+ CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id)
+);
+
+
+CREATE TABLE IF NOT EXISTS e2e_one_time_keys_json (
+ user_id TEXT NOT NULL, -- The user this one-time key is for.
+ device_id TEXT NOT NULL, -- The device this one-time key is for.
+ algorithm TEXT NOT NULL, -- Which algorithm this one-time key is for.
+ key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads.
+ ts_added_ms BIGINT NOT NULL, -- When this key was uploaded.
+ key_json TEXT NOT NULL, -- The key as a JSON blob.
+ CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id)
+);
diff --git a/synapse/storage/schema/delta/21/receipts.sql b/synapse/storage/schema/delta/21/receipts.sql
new file mode 100644
index 00000000..2f64d609
--- /dev/null
+++ b/synapse/storage/schema/delta/21/receipts.sql
@@ -0,0 +1,38 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS receipts_graph(
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_ids TEXT NOT NULL,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id)
+);
+
+CREATE TABLE IF NOT EXISTS receipts_linearized (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id)
+);
+
+CREATE INDEX receipts_linearized_id ON receipts_linearized(
+ stream_id
+);
diff --git a/synapse/storage/schema/delta/22/receipts_index.sql b/synapse/storage/schema/delta/22/receipts_index.sql
new file mode 100644
index 00000000..b182b2b6
--- /dev/null
+++ b/synapse/storage/schema/delta/22/receipts_index.sql
@@ -0,0 +1,18 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized(
+ room_id, stream_id
+);
diff --git a/synapse/storage/schema/delta/22/user_threepids_unique.sql b/synapse/storage/schema/delta/22/user_threepids_unique.sql
new file mode 100644
index 00000000..87edfa45
--- /dev/null
+++ b/synapse/storage/schema/delta/22/user_threepids_unique.sql
@@ -0,0 +1,19 @@
+CREATE TABLE IF NOT EXISTS user_threepids2 (
+ user_id TEXT NOT NULL,
+ medium TEXT NOT NULL,
+ address TEXT NOT NULL,
+ validated_at BIGINT NOT NULL,
+ added_at BIGINT NOT NULL,
+ CONSTRAINT medium_address UNIQUE (medium, address)
+);
+
+INSERT INTO user_threepids2
+ SELECT * FROM user_threepids WHERE added_at IN (
+ SELECT max(added_at) FROM user_threepids GROUP BY medium, address
+ )
+;
+
+DROP TABLE user_threepids;
+ALTER TABLE user_threepids2 RENAME TO user_threepids;
+
+CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
diff --git a/synapse/storage/schema/delta/23/drop_state_index.sql b/synapse/storage/schema/delta/23/drop_state_index.sql
new file mode 100644
index 00000000..07d0ea5c
--- /dev/null
+++ b/synapse/storage/schema/delta/23/drop_state_index.sql
@@ -0,0 +1,16 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+DROP INDEX IF EXISTS state_groups_state_tuple;
diff --git a/synapse/storage/schema/delta/23/refresh_tokens.sql b/synapse/storage/schema/delta/23/refresh_tokens.sql
new file mode 100644
index 00000000..437b1ac1
--- /dev/null
+++ b/synapse/storage/schema/delta/23/refresh_tokens.sql
@@ -0,0 +1,21 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS refresh_tokens(
+ id INTEGER PRIMARY KEY,
+ token TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ UNIQUE (token)
+);
diff --git a/synapse/storage/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/delta/24/stats_reporting.sql
new file mode 100644
index 00000000..e9165d29
--- /dev/null
+++ b/synapse/storage/schema/delta/24/stats_reporting.sql
@@ -0,0 +1,22 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Should only ever contain one row
+CREATE TABLE IF NOT EXISTS stats_reporting(
+ -- The stream ordering token which was most recently reported as stats
+ reported_stream_token INTEGER,
+ -- The time (seconds since epoch) stats were most recently reported
+ reported_time BIGINT
+);
diff --git a/synapse/storage/schema/delta/25/00background_updates.sql b/synapse/storage/schema/delta/25/00background_updates.sql
new file mode 100644
index 00000000..41a9b59b
--- /dev/null
+++ b/synapse/storage/schema/delta/25/00background_updates.sql
@@ -0,0 +1,21 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS background_updates(
+ update_name TEXT NOT NULL, -- The name of the background update.
+ progress_json TEXT NOT NULL, -- The current progress of the update as JSON.
+ CONSTRAINT background_updates_uniqueness UNIQUE (update_name)
+);
diff --git a/synapse/storage/schema/delta/25/fts.py b/synapse/storage/schema/delta/25/fts.py
new file mode 100644
index 00000000..5239d690
--- /dev/null
+++ b/synapse/storage/schema/delta/25/fts.py
@@ -0,0 +1,78 @@
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.storage.prepare_database import get_statements
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+
+import ujson
+
+logger = logging.getLogger(__name__)
+
+
+POSTGRES_TABLE = """
+CREATE TABLE IF NOT EXISTS event_search (
+ event_id TEXT,
+ room_id TEXT,
+ sender TEXT,
+ key TEXT,
+ vector tsvector
+);
+
+CREATE INDEX event_search_fts_idx ON event_search USING gin(vector);
+CREATE INDEX event_search_ev_idx ON event_search(event_id);
+CREATE INDEX event_search_ev_ridx ON event_search(room_id);
+"""
+
+
+SQLITE_TABLE = (
+ "CREATE VIRTUAL TABLE IF NOT EXISTS event_search"
+ " USING fts4 ( event_id, room_id, sender, key, value )"
+)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ if isinstance(database_engine, PostgresEngine):
+ for statement in get_statements(POSTGRES_TABLE.splitlines()):
+ cur.execute(statement)
+ elif isinstance(database_engine, Sqlite3Engine):
+ cur.execute(SQLITE_TABLE)
+ else:
+ raise Exception("Unrecognized database engine")
+
+ cur.execute("SELECT MIN(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ min_stream_id = rows[0][0]
+
+ cur.execute("SELECT MAX(stream_ordering) FROM events")
+ rows = cur.fetchall()
+ max_stream_id = rows[0][0]
+
+ if min_stream_id is not None and max_stream_id is not None:
+ progress = {
+ "target_min_stream_id_inclusive": min_stream_id,
+ "max_stream_id_exclusive": max_stream_id + 1,
+ "rows_inserted": 0,
+ }
+ progress_json = ujson.dumps(progress)
+
+ sql = (
+ "INSERT into background_updates (update_name, progress_json)"
+ " VALUES (?, ?)"
+ )
+
+ sql = database_engine.convert_param_style(sql)
+
+ cur.execute(sql, ("event_search", progress_json))
diff --git a/synapse/storage/schema/delta/25/guest_access.sql b/synapse/storage/schema/delta/25/guest_access.sql
new file mode 100644
index 00000000..bdb90e71
--- /dev/null
+++ b/synapse/storage/schema/delta/25/guest_access.sql
@@ -0,0 +1,25 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is a manual index of guest_access content of state events,
+ * so that we can join on them in SELECT statements.
+ */
+CREATE TABLE IF NOT EXISTS guest_access(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ guest_access TEXT NOT NULL,
+ UNIQUE (event_id)
+);
diff --git a/synapse/storage/schema/delta/25/history_visibility.sql b/synapse/storage/schema/delta/25/history_visibility.sql
new file mode 100644
index 00000000..532cb051
--- /dev/null
+++ b/synapse/storage/schema/delta/25/history_visibility.sql
@@ -0,0 +1,25 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is a manual index of history_visibility content of state events,
+ * so that we can join on them in SELECT statements.
+ */
+CREATE TABLE IF NOT EXISTS history_visibility(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ history_visibility TEXT NOT NULL,
+ UNIQUE (event_id)
+);
diff --git a/synapse/storage/schema/delta/25/tags.sql b/synapse/storage/schema/delta/25/tags.sql
new file mode 100644
index 00000000..527424c9
--- /dev/null
+++ b/synapse/storage/schema/delta/25/tags.sql
@@ -0,0 +1,38 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE TABLE IF NOT EXISTS room_tags(
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ tag TEXT NOT NULL, -- The name of the tag.
+ content TEXT NOT NULL, -- The JSON content of the tag.
+ CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag)
+);
+
+CREATE TABLE IF NOT EXISTS room_tags_revisions (
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ stream_id BIGINT NOT NULL, -- The current version of the room tags.
+ CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id)
+);
+
+CREATE TABLE IF NOT EXISTS private_user_data_max_stream_id(
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+
+INSERT INTO private_user_data_max_stream_id (stream_id) VALUES (0);
diff --git a/synapse/storage/schema/full_schemas/11/event_edges.sql b/synapse/storage/schema/full_schemas/11/event_edges.sql
new file mode 100644
index 00000000..f7020f77
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/event_edges.sql
@@ -0,0 +1,89 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_forward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_backward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_edges(
+ event_id TEXT NOT NULL,
+ prev_event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ is_state BOOL NOT NULL,
+ UNIQUE (event_id, prev_event_id, room_id, is_state)
+);
+
+CREATE INDEX ev_edges_id ON event_edges(event_id);
+CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
+
+
+CREATE TABLE IF NOT EXISTS room_depth(
+ room_id TEXT NOT NULL,
+ min_depth INTEGER NOT NULL,
+ UNIQUE (room_id)
+);
+
+CREATE INDEX room_depth_room ON room_depth(room_id);
+
+
+create TABLE IF NOT EXISTS event_destinations(
+ event_id TEXT NOT NULL,
+ destination TEXT NOT NULL,
+ delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
+ UNIQUE (event_id, destination)
+);
+
+CREATE INDEX event_destinations_id ON event_destinations(event_id);
+
+
+CREATE TABLE IF NOT EXISTS state_forward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX st_extrem_keys ON state_forward_extremities(
+ room_id, type, state_key
+);
+CREATE INDEX st_extrem_id ON state_forward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_auth(
+ event_id TEXT NOT NULL,
+ auth_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, auth_id, room_id)
+);
+
+CREATE INDEX evauth_edges_id ON event_auth(event_id);
+CREATE INDEX evauth_edges_auth_id ON event_auth(auth_id);
diff --git a/synapse/storage/schema/full_schemas/11/event_signatures.sql b/synapse/storage/schema/full_schemas/11/event_signatures.sql
new file mode 100644
index 00000000..636b2d33
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/event_signatures.sql
@@ -0,0 +1,55 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_content_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_reference_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_signatures (
+ event_id TEXT,
+ signature_name TEXT,
+ key_id TEXT,
+ signature bytea,
+ UNIQUE (event_id, signature_name, key_id)
+);
+
+CREATE INDEX event_signatures_id ON event_signatures(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_edge_hashes(
+ event_id TEXT,
+ prev_event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, prev_event_id, algorithm)
+);
+
+CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id);
diff --git a/synapse/storage/schema/full_schemas/11/im.sql b/synapse/storage/schema/full_schemas/11/im.sql
new file mode 100644
index 00000000..1901654a
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/im.sql
@@ -0,0 +1,123 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS events(
+ stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
+ topological_ordering BIGINT NOT NULL,
+ event_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ content TEXT NOT NULL,
+ unrecognized_keys TEXT,
+ processed BOOL NOT NULL,
+ outlier BOOL NOT NULL,
+ depth BIGINT DEFAULT 0 NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX events_stream_ordering ON events (stream_ordering);
+CREATE INDEX events_topological_ordering ON events (topological_ordering);
+CREATE INDEX events_room_id ON events (room_id);
+
+
+CREATE TABLE IF NOT EXISTS event_json(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ internal_metadata TEXT NOT NULL,
+ json TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX event_json_room_id ON event_json(room_id);
+
+
+CREATE TABLE IF NOT EXISTS state_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ prev_state TEXT,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX state_events_room_id ON state_events (room_id);
+CREATE INDEX state_events_type ON state_events (type);
+CREATE INDEX state_events_state_key ON state_events (state_key);
+
+
+CREATE TABLE IF NOT EXISTS current_state_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ UNIQUE (room_id, type, state_key)
+);
+
+CREATE INDEX curr_events_event_id ON current_state_events (event_id);
+CREATE INDEX current_state_events_room_id ON current_state_events (room_id);
+CREATE INDEX current_state_events_type ON current_state_events (type);
+CREATE INDEX current_state_events_state_key ON current_state_events (state_key);
+
+CREATE TABLE IF NOT EXISTS room_memberships(
+ event_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ sender TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ membership TEXT NOT NULL
+);
+
+CREATE INDEX room_memberships_event_id ON room_memberships (event_id);
+CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
+
+CREATE TABLE IF NOT EXISTS feedback(
+ event_id TEXT NOT NULL,
+ feedback_type TEXT,
+ target_event_id TEXT,
+ sender TEXT,
+ room_id TEXT
+);
+
+CREATE TABLE IF NOT EXISTS topics(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ topic TEXT NOT NULL
+);
+
+CREATE INDEX topics_event_id ON topics(event_id);
+CREATE INDEX topics_room_id ON topics(room_id);
+
+CREATE TABLE IF NOT EXISTS room_names(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ name TEXT NOT NULL
+);
+
+CREATE INDEX room_names_event_id ON room_names(event_id);
+CREATE INDEX room_names_room_id ON room_names(room_id);
+
+CREATE TABLE IF NOT EXISTS rooms(
+ room_id TEXT PRIMARY KEY NOT NULL,
+ is_public BOOL,
+ creator TEXT
+);
+
+CREATE TABLE IF NOT EXISTS room_hosts(
+ room_id TEXT NOT NULL,
+ host TEXT NOT NULL,
+ UNIQUE (room_id, host)
+);
+
+CREATE INDEX room_hosts_room_id ON room_hosts (room_id);
diff --git a/synapse/storage/schema/full_schemas/11/keys.sql b/synapse/storage/schema/full_schemas/11/keys.sql
new file mode 100644
index 00000000..afc14204
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/keys.sql
@@ -0,0 +1,31 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS server_tls_certificates(
+ server_name TEXT, -- Server name.
+ fingerprint TEXT, -- Certificate fingerprint.
+ from_server TEXT, -- Which key server the certificate was fetched from.
+ ts_added_ms BIGINT, -- When the certifcate was added.
+ tls_certificate bytea, -- DER encoded x509 certificate.
+ UNIQUE (server_name, fingerprint)
+);
+
+CREATE TABLE IF NOT EXISTS server_signature_keys(
+ server_name TEXT, -- Server name.
+ key_id TEXT, -- Key version.
+ from_server TEXT, -- Which key server the key was fetched form.
+ ts_added_ms BIGINT, -- When the key was added.
+ verify_key bytea, -- NACL verification key.
+ UNIQUE (server_name, key_id)
+);
diff --git a/synapse/storage/schema/full_schemas/11/media_repository.sql b/synapse/storage/schema/full_schemas/11/media_repository.sql
new file mode 100644
index 00000000..e927e581
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/media_repository.sql
@@ -0,0 +1,65 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS local_media_repository (
+ media_id TEXT, -- The id used to refer to the media.
+ media_type TEXT, -- The MIME-type of the media.
+ media_length INTEGER, -- Length of the media in bytes.
+ created_ts BIGINT, -- When the content was uploaded in ms.
+ upload_name TEXT, -- The name the media was uploaded with.
+ user_id TEXT, -- The user who uploaded the file.
+ UNIQUE (media_id)
+);
+
+CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
+ media_id TEXT, -- The id used to refer to the media.
+ thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
+ thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
+ thumbnail_type TEXT, -- The MIME-type of the thumbnail.
+ thumbnail_method TEXT, -- The method used to make the thumbnail.
+ thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
+ UNIQUE (
+ media_id, thumbnail_width, thumbnail_height, thumbnail_type
+ )
+);
+
+CREATE INDEX local_media_repository_thumbnails_media_id
+ ON local_media_repository_thumbnails (media_id);
+
+CREATE TABLE IF NOT EXISTS remote_media_cache (
+ media_origin TEXT, -- The remote HS the media came from.
+ media_id TEXT, -- The id used to refer to the media on that server.
+ media_type TEXT, -- The MIME-type of the media.
+ created_ts BIGINT, -- When the content was uploaded in ms.
+ upload_name TEXT, -- The name the media was uploaded with.
+ media_length INTEGER, -- Length of the media in bytes.
+ filesystem_id TEXT, -- The name used to store the media on disk.
+ UNIQUE (media_origin, media_id)
+);
+
+CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
+ media_origin TEXT, -- The remote HS the media came from.
+ media_id TEXT, -- The id used to refer to the media.
+ thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
+ thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
+ thumbnail_method TEXT, -- The method used to make the thumbnail
+ thumbnail_type TEXT, -- The MIME-type of the thumbnail.
+ thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
+ filesystem_id TEXT, -- The name used to store the media on disk.
+ UNIQUE (
+ media_origin, media_id, thumbnail_width, thumbnail_height,
+ thumbnail_type
+ )
+);
diff --git a/synapse/storage/schema/full_schemas/11/presence.sql b/synapse/storage/schema/full_schemas/11/presence.sql
new file mode 100644
index 00000000..d8d82e9f
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/presence.sql
@@ -0,0 +1,35 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS presence(
+ user_id TEXT NOT NULL,
+ state VARCHAR(20),
+ status_msg TEXT,
+ mtime BIGINT -- miliseconds since last state change
+);
+
+-- For each of /my/ users which possibly-remote users are allowed to see their
+-- presence state
+CREATE TABLE IF NOT EXISTS presence_allow_inbound(
+ observed_user_id TEXT NOT NULL,
+ observer_user_id TEXT NOT NULL -- a UserID,
+);
+
+-- For each of /my/ users (watcher), which possibly-remote users are they
+-- watching?
+CREATE TABLE IF NOT EXISTS presence_list(
+ user_id TEXT NOT NULL,
+ observed_user_id TEXT NOT NULL, -- a UserID,
+ accepted BOOLEAN NOT NULL
+);
diff --git a/synapse/storage/schema/full_schemas/11/profiles.sql b/synapse/storage/schema/full_schemas/11/profiles.sql
new file mode 100644
index 00000000..26e42044
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/profiles.sql
@@ -0,0 +1,19 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS profiles(
+ user_id TEXT NOT NULL,
+ displayname TEXT,
+ avatar_url TEXT
+);
diff --git a/synapse/storage/schema/full_schemas/11/redactions.sql b/synapse/storage/schema/full_schemas/11/redactions.sql
new file mode 100644
index 00000000..69621955
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/redactions.sql
@@ -0,0 +1,22 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS redactions (
+ event_id TEXT NOT NULL,
+ redacts TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX redactions_event_id ON redactions (event_id);
+CREATE INDEX redactions_redacts ON redactions (redacts);
diff --git a/synapse/storage/schema/full_schemas/11/room_aliases.sql b/synapse/storage/schema/full_schemas/11/room_aliases.sql
new file mode 100644
index 00000000..5027b1e3
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/room_aliases.sql
@@ -0,0 +1,24 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS room_aliases(
+ room_alias TEXT NOT NULL,
+ room_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS room_alias_servers(
+ room_alias TEXT NOT NULL,
+ server TEXT NOT NULL
+);
diff --git a/synapse/storage/schema/full_schemas/11/state.sql b/synapse/storage/schema/full_schemas/11/state.sql
new file mode 100644
index 00000000..ffd164ab
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/state.sql
@@ -0,0 +1,40 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS state_groups(
+ id INTEGER PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS state_groups_state(
+ state_group INTEGER NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS event_to_state_groups(
+ event_id TEXT NOT NULL,
+ state_group INTEGER NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX state_groups_id ON state_groups(id);
+
+CREATE INDEX state_groups_state_id ON state_groups_state(state_group);
+CREATE INDEX state_groups_state_tuple ON state_groups_state(room_id, type, state_key);
+CREATE INDEX event_to_state_groups_id ON event_to_state_groups(event_id);
diff --git a/synapse/storage/schema/full_schemas/11/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql
new file mode 100644
index 00000000..cc5b54f5
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/transactions.sql
@@ -0,0 +1,63 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-- Stores what transaction ids we have received and what our response was
+CREATE TABLE IF NOT EXISTS received_transactions(
+ transaction_id TEXT,
+ origin TEXT,
+ ts BIGINT,
+ response_code INTEGER,
+ response_json bytea,
+ has_been_referenced SMALLINT DEFAULT 0, -- Whether thishas been referenced by a prev_tx
+ UNIQUE (transaction_id, origin)
+);
+
+CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
+
+
+-- Stores what transactions we've sent, what their response was (if we got one) and whether we have
+-- since referenced the transaction in another outgoing transaction
+CREATE TABLE IF NOT EXISTS sent_transactions(
+ id INTEGER PRIMARY KEY AUTOINCREMENT, -- This is used to apply insertion ordering
+ transaction_id TEXT,
+ destination TEXT,
+ response_code INTEGER DEFAULT 0,
+ response_json TEXT,
+ ts BIGINT
+);
+
+CREATE INDEX sent_transaction_dest ON sent_transactions(destination);
+CREATE INDEX sent_transaction_txn_id ON sent_transactions(transaction_id);
+-- So that we can do an efficient look up of all transactions that have yet to be successfully
+-- sent.
+CREATE INDEX sent_transaction_sent ON sent_transactions(response_code);
+
+
+-- For sent transactions only.
+CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
+ transaction_id INTEGER,
+ destination TEXT,
+ pdu_id TEXT,
+ pdu_origin TEXT
+);
+
+CREATE INDEX transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination);
+CREATE INDEX transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
+
+-- To track destination health
+CREATE TABLE IF NOT EXISTS destinations(
+ destination TEXT PRIMARY KEY,
+ retry_last_ts BIGINT,
+ retry_interval INTEGER
+);
diff --git a/synapse/storage/schema/full_schemas/11/users.sql b/synapse/storage/schema/full_schemas/11/users.sql
new file mode 100644
index 00000000..eec3da3c
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/11/users.sql
@@ -0,0 +1,43 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS users(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT,
+ password_hash TEXT,
+ creation_ts BIGINT,
+ admin SMALLINT DEFAULT 0 NOT NULL,
+ UNIQUE(name)
+);
+
+CREATE TABLE IF NOT EXISTS access_tokens(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_id TEXT NOT NULL,
+ device_id TEXT,
+ token TEXT NOT NULL,
+ last_used BIGINT,
+ UNIQUE(token)
+);
+
+CREATE TABLE IF NOT EXISTS user_ips (
+ user TEXT NOT NULL,
+ access_token TEXT NOT NULL,
+ device_id TEXT,
+ ip TEXT NOT NULL,
+ user_agent TEXT NOT NULL,
+ last_seen BIGINT NOT NULL,
+ UNIQUE (user, access_token, ip, user_agent)
+);
+
+CREATE INDEX user_ips_user ON user_ips(user);
diff --git a/synapse/storage/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/full_schemas/16/application_services.sql
new file mode 100644
index 00000000..d382d63f
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/application_services.sql
@@ -0,0 +1,48 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS application_services(
+ id BIGINT PRIMARY KEY,
+ url TEXT,
+ token TEXT,
+ hs_token TEXT,
+ sender TEXT,
+ UNIQUE(token)
+);
+
+CREATE TABLE IF NOT EXISTS application_services_regex(
+ id BIGINT PRIMARY KEY,
+ as_id BIGINT NOT NULL,
+ namespace INTEGER, /* enum[room_id|room_alias|user_id] */
+ regex TEXT,
+ FOREIGN KEY(as_id) REFERENCES application_services(id)
+);
+
+CREATE TABLE IF NOT EXISTS application_services_state(
+ as_id TEXT PRIMARY KEY,
+ state VARCHAR(5),
+ last_txn INTEGER
+);
+
+CREATE TABLE IF NOT EXISTS application_services_txns(
+ as_id TEXT NOT NULL,
+ txn_id INTEGER NOT NULL,
+ event_ids TEXT NOT NULL,
+ UNIQUE(as_id, txn_id)
+);
+
+CREATE INDEX application_services_txns_id ON application_services_txns (
+ as_id
+);
diff --git a/synapse/storage/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/full_schemas/16/event_edges.sql
new file mode 100644
index 00000000..f7020f77
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/event_edges.sql
@@ -0,0 +1,89 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_forward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_backward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_edges(
+ event_id TEXT NOT NULL,
+ prev_event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ is_state BOOL NOT NULL,
+ UNIQUE (event_id, prev_event_id, room_id, is_state)
+);
+
+CREATE INDEX ev_edges_id ON event_edges(event_id);
+CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
+
+
+CREATE TABLE IF NOT EXISTS room_depth(
+ room_id TEXT NOT NULL,
+ min_depth INTEGER NOT NULL,
+ UNIQUE (room_id)
+);
+
+CREATE INDEX room_depth_room ON room_depth(room_id);
+
+
+create TABLE IF NOT EXISTS event_destinations(
+ event_id TEXT NOT NULL,
+ destination TEXT NOT NULL,
+ delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
+ UNIQUE (event_id, destination)
+);
+
+CREATE INDEX event_destinations_id ON event_destinations(event_id);
+
+
+CREATE TABLE IF NOT EXISTS state_forward_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ UNIQUE (event_id, room_id)
+);
+
+CREATE INDEX st_extrem_keys ON state_forward_extremities(
+ room_id, type, state_key
+);
+CREATE INDEX st_extrem_id ON state_forward_extremities(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_auth(
+ event_id TEXT NOT NULL,
+ auth_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, auth_id, room_id)
+);
+
+CREATE INDEX evauth_edges_id ON event_auth(event_id);
+CREATE INDEX evauth_edges_auth_id ON event_auth(auth_id);
diff --git a/synapse/storage/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/full_schemas/16/event_signatures.sql
new file mode 100644
index 00000000..636b2d33
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/event_signatures.sql
@@ -0,0 +1,55 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS event_content_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_reference_hashes (
+ event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, algorithm)
+);
+
+CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_signatures (
+ event_id TEXT,
+ signature_name TEXT,
+ key_id TEXT,
+ signature bytea,
+ UNIQUE (event_id, signature_name, key_id)
+);
+
+CREATE INDEX event_signatures_id ON event_signatures(event_id);
+
+
+CREATE TABLE IF NOT EXISTS event_edge_hashes(
+ event_id TEXT,
+ prev_event_id TEXT,
+ algorithm TEXT,
+ hash bytea,
+ UNIQUE (event_id, prev_event_id, algorithm)
+);
+
+CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id);
diff --git a/synapse/storage/schema/full_schemas/16/im.sql b/synapse/storage/schema/full_schemas/16/im.sql
new file mode 100644
index 00000000..576653a3
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/im.sql
@@ -0,0 +1,128 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS events(
+ stream_ordering INTEGER PRIMARY KEY,
+ topological_ordering BIGINT NOT NULL,
+ event_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ content TEXT NOT NULL,
+ unrecognized_keys TEXT,
+ processed BOOL NOT NULL,
+ outlier BOOL NOT NULL,
+ depth BIGINT DEFAULT 0 NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX events_stream_ordering ON events (stream_ordering);
+CREATE INDEX events_topological_ordering ON events (topological_ordering);
+CREATE INDEX events_order ON events (topological_ordering, stream_ordering);
+CREATE INDEX events_room_id ON events (room_id);
+CREATE INDEX events_order_room ON events (
+ room_id, topological_ordering, stream_ordering
+);
+
+
+CREATE TABLE IF NOT EXISTS event_json(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ internal_metadata TEXT NOT NULL,
+ json TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX event_json_room_id ON event_json(room_id);
+
+
+CREATE TABLE IF NOT EXISTS state_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ prev_state TEXT,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX state_events_room_id ON state_events (room_id);
+CREATE INDEX state_events_type ON state_events (type);
+CREATE INDEX state_events_state_key ON state_events (state_key);
+
+
+CREATE TABLE IF NOT EXISTS current_state_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ UNIQUE (event_id),
+ UNIQUE (room_id, type, state_key)
+);
+
+CREATE INDEX current_state_events_room_id ON current_state_events (room_id);
+CREATE INDEX current_state_events_type ON current_state_events (type);
+CREATE INDEX current_state_events_state_key ON current_state_events (state_key);
+
+CREATE TABLE IF NOT EXISTS room_memberships(
+ event_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ sender TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ membership TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
+
+CREATE TABLE IF NOT EXISTS feedback(
+ event_id TEXT NOT NULL,
+ feedback_type TEXT,
+ target_event_id TEXT,
+ sender TEXT,
+ room_id TEXT,
+ UNIQUE (event_id)
+);
+
+CREATE TABLE IF NOT EXISTS topics(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ topic TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX topics_room_id ON topics(room_id);
+
+CREATE TABLE IF NOT EXISTS room_names(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ name TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX room_names_room_id ON room_names(room_id);
+
+CREATE TABLE IF NOT EXISTS rooms(
+ room_id TEXT PRIMARY KEY NOT NULL,
+ is_public BOOL,
+ creator TEXT
+);
+
+CREATE TABLE IF NOT EXISTS room_hosts(
+ room_id TEXT NOT NULL,
+ host TEXT NOT NULL,
+ UNIQUE (room_id, host)
+);
+
+CREATE INDEX room_hosts_room_id ON room_hosts (room_id);
diff --git a/synapse/storage/schema/full_schemas/16/keys.sql b/synapse/storage/schema/full_schemas/16/keys.sql
new file mode 100644
index 00000000..afc14204
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/keys.sql
@@ -0,0 +1,31 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS server_tls_certificates(
+ server_name TEXT, -- Server name.
+ fingerprint TEXT, -- Certificate fingerprint.
+ from_server TEXT, -- Which key server the certificate was fetched from.
+ ts_added_ms BIGINT, -- When the certifcate was added.
+ tls_certificate bytea, -- DER encoded x509 certificate.
+ UNIQUE (server_name, fingerprint)
+);
+
+CREATE TABLE IF NOT EXISTS server_signature_keys(
+ server_name TEXT, -- Server name.
+ key_id TEXT, -- Key version.
+ from_server TEXT, -- Which key server the key was fetched form.
+ ts_added_ms BIGINT, -- When the key was added.
+ verify_key bytea, -- NACL verification key.
+ UNIQUE (server_name, key_id)
+);
diff --git a/synapse/storage/schema/full_schemas/16/media_repository.sql b/synapse/storage/schema/full_schemas/16/media_repository.sql
new file mode 100644
index 00000000..dacbda40
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/media_repository.sql
@@ -0,0 +1,68 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS local_media_repository (
+ media_id TEXT, -- The id used to refer to the media.
+ media_type TEXT, -- The MIME-type of the media.
+ media_length INTEGER, -- Length of the media in bytes.
+ created_ts BIGINT, -- When the content was uploaded in ms.
+ upload_name TEXT, -- The name the media was uploaded with.
+ user_id TEXT, -- The user who uploaded the file.
+ UNIQUE (media_id)
+);
+
+CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
+ media_id TEXT, -- The id used to refer to the media.
+ thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
+ thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
+ thumbnail_type TEXT, -- The MIME-type of the thumbnail.
+ thumbnail_method TEXT, -- The method used to make the thumbnail.
+ thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
+ UNIQUE (
+ media_id, thumbnail_width, thumbnail_height, thumbnail_type
+ )
+);
+
+CREATE INDEX local_media_repository_thumbnails_media_id
+ ON local_media_repository_thumbnails (media_id);
+
+CREATE TABLE IF NOT EXISTS remote_media_cache (
+ media_origin TEXT, -- The remote HS the media came from.
+ media_id TEXT, -- The id used to refer to the media on that server.
+ media_type TEXT, -- The MIME-type of the media.
+ created_ts BIGINT, -- When the content was uploaded in ms.
+ upload_name TEXT, -- The name the media was uploaded with.
+ media_length INTEGER, -- Length of the media in bytes.
+ filesystem_id TEXT, -- The name used to store the media on disk.
+ UNIQUE (media_origin, media_id)
+);
+
+CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
+ media_origin TEXT, -- The remote HS the media came from.
+ media_id TEXT, -- The id used to refer to the media.
+ thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
+ thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
+ thumbnail_method TEXT, -- The method used to make the thumbnail
+ thumbnail_type TEXT, -- The MIME-type of the thumbnail.
+ thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
+ filesystem_id TEXT, -- The name used to store the media on disk.
+ UNIQUE (
+ media_origin, media_id, thumbnail_width, thumbnail_height,
+ thumbnail_type
+ )
+);
+
+CREATE INDEX remote_media_cache_thumbnails_media_id
+ ON remote_media_cache_thumbnails (media_id);
diff --git a/synapse/storage/schema/full_schemas/16/presence.sql b/synapse/storage/schema/full_schemas/16/presence.sql
new file mode 100644
index 00000000..80088413
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/presence.sql
@@ -0,0 +1,40 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS presence(
+ user_id TEXT NOT NULL,
+ state VARCHAR(20),
+ status_msg TEXT,
+ mtime BIGINT, -- miliseconds since last state change
+ UNIQUE (user_id)
+);
+
+-- For each of /my/ users which possibly-remote users are allowed to see their
+-- presence state
+CREATE TABLE IF NOT EXISTS presence_allow_inbound(
+ observed_user_id TEXT NOT NULL,
+ observer_user_id TEXT NOT NULL, -- a UserID,
+ UNIQUE (observed_user_id, observer_user_id)
+);
+
+-- For each of /my/ users (watcher), which possibly-remote users are they
+-- watching?
+CREATE TABLE IF NOT EXISTS presence_list(
+ user_id TEXT NOT NULL,
+ observed_user_id TEXT NOT NULL, -- a UserID,
+ accepted BOOLEAN NOT NULL,
+ UNIQUE (user_id, observed_user_id)
+);
+
+CREATE INDEX presence_list_user_id ON presence_list (user_id);
diff --git a/synapse/storage/schema/full_schemas/16/profiles.sql b/synapse/storage/schema/full_schemas/16/profiles.sql
new file mode 100644
index 00000000..934be865
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/profiles.sql
@@ -0,0 +1,20 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS profiles(
+ user_id TEXT NOT NULL,
+ displayname TEXT,
+ avatar_url TEXT,
+ UNIQUE(user_id)
+);
diff --git a/synapse/storage/schema/full_schemas/16/push.sql b/synapse/storage/schema/full_schemas/16/push.sql
new file mode 100644
index 00000000..9387f920
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/push.sql
@@ -0,0 +1,74 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS rejections(
+ event_id TEXT NOT NULL,
+ reason TEXT NOT NULL,
+ last_check TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+-- Push notification endpoints that users have configured
+CREATE TABLE IF NOT EXISTS pushers (
+ id BIGINT PRIMARY KEY,
+ user_name TEXT NOT NULL,
+ access_token BIGINT DEFAULT NULL,
+ profile_tag VARCHAR(32) NOT NULL,
+ kind VARCHAR(8) NOT NULL,
+ app_id VARCHAR(64) NOT NULL,
+ app_display_name VARCHAR(64) NOT NULL,
+ device_display_name VARCHAR(128) NOT NULL,
+ pushkey bytea NOT NULL,
+ ts BIGINT NOT NULL,
+ lang VARCHAR(8),
+ data bytea,
+ last_token TEXT,
+ last_success BIGINT,
+ failing_since BIGINT,
+ UNIQUE (app_id, pushkey)
+);
+
+CREATE TABLE IF NOT EXISTS push_rules (
+ id BIGINT PRIMARY KEY,
+ user_name TEXT NOT NULL,
+ rule_id TEXT NOT NULL,
+ priority_class SMALLINT NOT NULL,
+ priority INTEGER NOT NULL DEFAULT 0,
+ conditions TEXT NOT NULL,
+ actions TEXT NOT NULL,
+ UNIQUE(user_name, rule_id)
+);
+
+CREATE INDEX push_rules_user_name on push_rules (user_name);
+
+CREATE TABLE IF NOT EXISTS user_filters(
+ user_id TEXT,
+ filter_id BIGINT,
+ filter_json bytea
+);
+
+CREATE INDEX user_filters_by_user_id_filter_id ON user_filters(
+ user_id, filter_id
+);
+
+CREATE TABLE IF NOT EXISTS push_rules_enable (
+ id BIGINT PRIMARY KEY,
+ user_name TEXT NOT NULL,
+ rule_id TEXT NOT NULL,
+ enabled SMALLINT,
+ UNIQUE(user_name, rule_id)
+);
+
+CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name);
diff --git a/synapse/storage/schema/full_schemas/16/redactions.sql b/synapse/storage/schema/full_schemas/16/redactions.sql
new file mode 100644
index 00000000..69621955
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/redactions.sql
@@ -0,0 +1,22 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS redactions (
+ event_id TEXT NOT NULL,
+ redacts TEXT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX redactions_event_id ON redactions (event_id);
+CREATE INDEX redactions_redacts ON redactions (redacts);
diff --git a/synapse/storage/schema/full_schemas/16/room_aliases.sql b/synapse/storage/schema/full_schemas/16/room_aliases.sql
new file mode 100644
index 00000000..412bb97f
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/room_aliases.sql
@@ -0,0 +1,29 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS room_aliases(
+ room_alias TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (room_alias)
+);
+
+CREATE INDEX room_aliases_id ON room_aliases(room_id);
+
+CREATE TABLE IF NOT EXISTS room_alias_servers(
+ room_alias TEXT NOT NULL,
+ server TEXT NOT NULL
+);
+
+CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias);
diff --git a/synapse/storage/schema/full_schemas/16/state.sql b/synapse/storage/schema/full_schemas/16/state.sql
new file mode 100644
index 00000000..705cac6c
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/state.sql
@@ -0,0 +1,40 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS state_groups(
+ id BIGINT PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS state_groups_state(
+ state_group BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS event_to_state_groups(
+ event_id TEXT NOT NULL,
+ state_group BIGINT NOT NULL,
+ UNIQUE (event_id)
+);
+
+CREATE INDEX state_groups_id ON state_groups(id);
+
+CREATE INDEX state_groups_state_id ON state_groups_state(state_group);
+CREATE INDEX state_groups_state_tuple ON state_groups_state(room_id, type, state_key);
+CREATE INDEX event_to_state_groups_id ON event_to_state_groups(event_id);
diff --git a/synapse/storage/schema/full_schemas/16/transactions.sql b/synapse/storage/schema/full_schemas/16/transactions.sql
new file mode 100644
index 00000000..1ab77cdb
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/transactions.sql
@@ -0,0 +1,63 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-- Stores what transaction ids we have received and what our response was
+CREATE TABLE IF NOT EXISTS received_transactions(
+ transaction_id TEXT,
+ origin TEXT,
+ ts BIGINT,
+ response_code INTEGER,
+ response_json bytea,
+ has_been_referenced smallint default 0, -- Whether thishas been referenced by a prev_tx
+ UNIQUE (transaction_id, origin)
+);
+
+CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
+
+
+-- Stores what transactions we've sent, what their response was (if we got one) and whether we have
+-- since referenced the transaction in another outgoing transaction
+CREATE TABLE IF NOT EXISTS sent_transactions(
+ id BIGINT PRIMARY KEY, -- This is used to apply insertion ordering
+ transaction_id TEXT,
+ destination TEXT,
+ response_code INTEGER DEFAULT 0,
+ response_json TEXT,
+ ts BIGINT
+);
+
+CREATE INDEX sent_transaction_dest ON sent_transactions(destination);
+CREATE INDEX sent_transaction_txn_id ON sent_transactions(transaction_id);
+-- So that we can do an efficient look up of all transactions that have yet to be successfully
+-- sent.
+CREATE INDEX sent_transaction_sent ON sent_transactions(response_code);
+
+
+-- For sent transactions only.
+CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
+ transaction_id INTEGER,
+ destination TEXT,
+ pdu_id TEXT,
+ pdu_origin TEXT,
+ UNIQUE (transaction_id, destination)
+);
+
+CREATE INDEX transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
+
+-- To track destination health
+CREATE TABLE IF NOT EXISTS destinations(
+ destination TEXT PRIMARY KEY,
+ retry_last_ts BIGINT,
+ retry_interval INTEGER
+);
diff --git a/synapse/storage/schema/full_schemas/16/users.sql b/synapse/storage/schema/full_schemas/16/users.sql
new file mode 100644
index 00000000..d2fa3122
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/16/users.sql
@@ -0,0 +1,42 @@
+/* Copyright 2014, 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS users(
+ name TEXT,
+ password_hash TEXT,
+ creation_ts BIGINT,
+ admin SMALLINT DEFAULT 0 NOT NULL,
+ UNIQUE(name)
+);
+
+CREATE TABLE IF NOT EXISTS access_tokens(
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT,
+ token TEXT NOT NULL,
+ last_used BIGINT,
+ UNIQUE(token)
+);
+
+CREATE TABLE IF NOT EXISTS user_ips (
+ user_id TEXT NOT NULL,
+ access_token TEXT NOT NULL,
+ device_id TEXT,
+ ip TEXT NOT NULL,
+ user_agent TEXT NOT NULL,
+ last_seen BIGINT NOT NULL
+);
+
+CREATE INDEX user_ips_user ON user_ips(user_id);
+CREATE INDEX user_ips_user_ip ON user_ips(user_id, access_token, ip);
diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql
new file mode 100644
index 00000000..d682608a
--- /dev/null
+++ b/synapse/storage/schema/schema_version.sql
@@ -0,0 +1,27 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS schema_version(
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ version INTEGER NOT NULL,
+ upgraded BOOL NOT NULL, -- Whether we reached this version from an upgrade or an initial schema.
+ CHECK (Lock='X')
+);
+
+CREATE TABLE IF NOT EXISTS applied_schema_deltas(
+ version INTEGER NOT NULL,
+ file TEXT NOT NULL,
+ UNIQUE(version, file)
+);
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
new file mode 100644
index 00000000..380270b0
--- /dev/null
+++ b/synapse/storage/search.py
@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from .background_updates import BackgroundUpdateStore
+from synapse.api.errors import SynapseError
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class SearchStore(BackgroundUpdateStore):
+
+ EVENT_SEARCH_UPDATE_NAME = "event_search"
+
+ def __init__(self, hs):
+ super(SearchStore, self).__init__(hs)
+ self.register_background_update_handler(
+ self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
+ )
+
+ @defer.inlineCallbacks
+ def _background_reindex_search(self, progress, batch_size):
+ target_min_stream_id = progress["target_min_stream_id_inclusive"]
+ max_stream_id = progress["max_stream_id_exclusive"]
+ rows_inserted = progress.get("rows_inserted", 0)
+
+ INSERT_CLUMP_SIZE = 1000
+ TYPES = ["m.room.name", "m.room.message", "m.room.topic"]
+
+ def reindex_search_txn(txn):
+ sql = (
+ "SELECT stream_ordering, event_id FROM events"
+ " WHERE ? <= stream_ordering AND stream_ordering < ?"
+ " AND (%s)"
+ " ORDER BY stream_ordering DESC"
+ " LIMIT ?"
+ ) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
+
+ txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+ rows = txn.fetchall()
+ if not rows:
+ return 0
+
+ min_stream_id = rows[-1][0]
+ event_ids = [row[1] for row in rows]
+
+ events = self._get_events_txn(txn, event_ids)
+
+ event_search_rows = []
+ for event in events:
+ try:
+ event_id = event.event_id
+ room_id = event.room_id
+ content = event.content
+ if event.type == "m.room.message":
+ key = "content.body"
+ value = content["body"]
+ elif event.type == "m.room.topic":
+ key = "content.topic"
+ value = content["topic"]
+ elif event.type == "m.room.name":
+ key = "content.name"
+ value = content["name"]
+ except (KeyError, AttributeError):
+ # If the event is missing a necessary field then
+ # skip over it.
+ continue
+
+ event_search_rows.append((event_id, room_id, key, value))
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = (
+ "INSERT INTO event_search (event_id, room_id, key, vector)"
+ " VALUES (?,?,?,to_tsvector('english', ?))"
+ )
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ sql = (
+ "INSERT INTO event_search (event_id, room_id, key, value)"
+ " VALUES (?,?,?,?)"
+ )
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
+
+ for index in range(0, len(event_search_rows), INSERT_CLUMP_SIZE):
+ clump = event_search_rows[index:index + INSERT_CLUMP_SIZE]
+ txn.executemany(sql, clump)
+
+ progress = {
+ "target_min_stream_id_inclusive": target_min_stream_id,
+ "max_stream_id_exclusive": min_stream_id,
+ "rows_inserted": rows_inserted + len(event_search_rows)
+ }
+
+ self._background_update_progress_txn(
+ txn, self.EVENT_SEARCH_UPDATE_NAME, progress
+ )
+
+ return len(event_search_rows)
+
+ result = yield self.runInteraction(
+ self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
+ )
+
+ if not result:
+ yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
+
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def search_msgs(self, room_ids, search_term, keys):
+ """Performs a full text search over events with given keys.
+
+ Args:
+ room_ids (list): List of room ids to search in
+ search_term (str): Search term to search for
+ keys (list): List of keys to search in, currently supports
+ "content.body", "content.name", "content.topic"
+
+ Returns:
+ list of dicts
+ """
+ clauses = []
+ args = []
+
+ # Make sure we don't explode because the person is in too many rooms.
+ # We filter the results below regardless.
+ if len(room_ids) < 500:
+ clauses.append(
+ "room_id IN (%s)" % (",".join(["?"] * len(room_ids)),)
+ )
+ args.extend(room_ids)
+
+ local_clauses = []
+ for key in keys:
+ local_clauses.append("key = ?")
+ args.append(key)
+
+ clauses.append(
+ "(%s)" % (" OR ".join(local_clauses),)
+ )
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = (
+ "SELECT ts_rank_cd(vector, query) AS rank, room_id, event_id"
+ " FROM plainto_tsquery('english', ?) as query, event_search"
+ " WHERE vector @@ query"
+ )
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ sql = (
+ "SELECT rank(matchinfo(event_search)) as rank, room_id, event_id"
+ " FROM event_search"
+ " WHERE value MATCH ?"
+ )
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
+
+ for clause in clauses:
+ sql += " AND " + clause
+
+ # We add an arbitrary limit here to ensure we don't try to pull the
+ # entire table from the database.
+ sql += " ORDER BY rank DESC LIMIT 500"
+
+ results = yield self._execute(
+ "search_msgs", self.cursor_to_dict, sql, *([search_term] + args)
+ )
+
+ results = filter(lambda row: row["room_id"] in room_ids, results)
+
+ events = yield self._get_events([r["event_id"] for r in results])
+
+ event_map = {
+ ev.event_id: ev
+ for ev in events
+ }
+
+ defer.returnValue([
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ])
+
+ @defer.inlineCallbacks
+ def search_room(self, room_id, search_term, keys, limit, pagination_token=None):
+ """Performs a full text search over events with given keys.
+
+ Args:
+ room_id (str): The room_id to search in
+ search_term (str): Search term to search for
+ keys (list): List of keys to search in, currently supports
+ "content.body", "content.name", "content.topic"
+ pagination_token (str): A pagination token previously returned
+
+ Returns:
+ list of dicts
+ """
+ clauses = []
+ args = [search_term, room_id]
+
+ local_clauses = []
+ for key in keys:
+ local_clauses.append("key = ?")
+ args.append(key)
+
+ clauses.append(
+ "(%s)" % (" OR ".join(local_clauses),)
+ )
+
+ if pagination_token:
+ try:
+ topo, stream = pagination_token.split(",")
+ topo = int(topo)
+ stream = int(stream)
+ except:
+ raise SynapseError(400, "Invalid pagination token")
+
+ clauses.append(
+ "(topological_ordering < ?"
+ " OR (topological_ordering = ? AND stream_ordering < ?))"
+ )
+ args.extend([topo, topo, stream])
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = (
+ "SELECT ts_rank_cd(vector, query) as rank,"
+ " topological_ordering, stream_ordering, room_id, event_id"
+ " FROM plainto_tsquery('english', ?) as query, event_search"
+ " NATURAL JOIN events"
+ " WHERE vector @@ query AND room_id = ?"
+ )
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ # We use CROSS JOIN here to ensure we use the right indexes.
+ # https://sqlite.org/optoverview.html#crossjoin
+ #
+ # We want to use the full text search index on event_search to
+ # extract all possible matches first, then lookup those matches
+ # in the events table to get the topological ordering. We need
+ # to use the indexes in this order because sqlite refuses to
+ # MATCH unless it uses the full text search index
+ sql = (
+ "SELECT rank(matchinfo) as rank, room_id, event_id,"
+ " topological_ordering, stream_ordering"
+ " FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
+ " FROM event_search"
+ " WHERE value MATCH ?"
+ " )"
+ " CROSS JOIN events USING (event_id)"
+ " WHERE room_id = ?"
+ )
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
+
+ for clause in clauses:
+ sql += " AND " + clause
+
+ # We add an arbitrary limit here to ensure we don't try to pull the
+ # entire table from the database.
+ sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+
+ args.append(limit)
+
+ results = yield self._execute(
+ "search_rooms", self.cursor_to_dict, sql, *args
+ )
+
+ events = yield self._get_events([r["event_id"] for r in results])
+
+ event_map = {
+ ev.event_id: ev
+ for ev in events
+ }
+
+ defer.returnValue([
+ {
+ "event": event_map[r["event_id"]],
+ "rank": r["rank"],
+ "pagination_token": "%s,%s" % (
+ r["topological_ordering"], r["stream_ordering"]
+ ),
+ }
+ for r in results
+ if r["event_id"] in event_map
+ ])
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
new file mode 100644
index 00000000..b070be50
--- /dev/null
+++ b/synapse/storage/signatures.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from _base import SQLBaseStore
+
+from unpaddedbase64 import encode_base64
+from synapse.crypto.event_signing import compute_event_reference_hash
+
+
+class SignatureStore(SQLBaseStore):
+ """Persistence for event signatures and hashes"""
+
+ def get_event_reference_hashes(self, event_ids):
+ def f(txn):
+ return [
+ self._get_event_reference_hashes_txn(txn, ev)
+ for ev in event_ids
+ ]
+
+ return self.runInteraction(
+ "get_event_reference_hashes",
+ f
+ )
+
+ @defer.inlineCallbacks
+ def add_event_hashes(self, event_ids):
+ hashes = yield self.get_event_reference_hashes(
+ event_ids
+ )
+ hashes = [
+ {
+ k: encode_base64(v) for k, v in h.items()
+ if k == "sha256"
+ }
+ for h in hashes
+ ]
+
+ defer.returnValue(zip(event_ids, hashes))
+
+ def _get_event_reference_hashes_txn(self, txn, event_id):
+ """Get all the hashes for a given PDU.
+ Args:
+ txn (cursor):
+ event_id (str): Id for the Event.
+ Returns:
+ A dict of algorithm -> hash.
+ """
+ query = (
+ "SELECT algorithm, hash"
+ " FROM event_reference_hashes"
+ " WHERE event_id = ?"
+ )
+ txn.execute(query, (event_id, ))
+ return {k: v for k, v in txn.fetchall()}
+
+ def _store_event_reference_hashes_txn(self, txn, events):
+ """Store a hash for a PDU
+ Args:
+ txn (cursor):
+ events (list): list of Events.
+ """
+
+ vals = []
+ for event in events:
+ ref_alg, ref_hash_bytes = compute_event_reference_hash(event)
+ vals.append({
+ "event_id": event.event_id,
+ "algorithm": ref_alg,
+ "hash": buffer(ref_hash_bytes),
+ })
+
+ self._simple_insert_many_txn(
+ txn,
+ table="event_reference_hashes",
+ values=vals,
+ )
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
new file mode 100644
index 00000000..80e9b63f
--- /dev/null
+++ b/synapse/storage/state.py
@@ -0,0 +1,441 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import (
+ cached, cachedInlineCallbacks, cachedList
+)
+
+from twisted.internet import defer
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class StateStore(SQLBaseStore):
+ """ Keeps track of the state at a given event.
+
+ This is done by the concept of `state groups`. Every event is a assigned
+ a state group (identified by an arbitrary string), which references a
+ collection of state events. The current state of an event is then the
+ collection of state events referenced by the event's state group.
+
+ Hence, every change in the current state causes a new state group to be
+ generated. However, if no change happens (e.g., if we get a message event
+ with only one parent it inherits the state group from its parent.)
+
+ There are three tables:
+ * `state_groups`: Stores group name, first event with in the group and
+ room id.
+ * `event_to_state_groups`: Maps events to state groups.
+ * `state_groups_state`: Maps state group to state events.
+ """
+
+ @defer.inlineCallbacks
+ def get_state_groups(self, room_id, event_ids):
+ """ Get the state groups for the given list of event_ids
+
+ The return value is a dict mapping group names to lists of events.
+ """
+ if not event_ids:
+ defer.returnValue({})
+
+ event_to_groups = yield self._get_state_group_for_events(
+ event_ids,
+ )
+
+ groups = set(event_to_groups.values())
+ group_to_state = yield self._get_state_for_groups(groups)
+
+ defer.returnValue({
+ group: state_map.values()
+ for group, state_map in group_to_state.items()
+ })
+
+ def _store_state_groups_txn(self, txn, event, context):
+ return self._store_mult_state_groups_txn(txn, [(event, context)])
+
+ def _store_mult_state_groups_txn(self, txn, events_and_contexts):
+ state_groups = {}
+ for event, context in events_and_contexts:
+ if context.current_state is None:
+ continue
+
+ if context.state_group is not None:
+ state_groups[event.event_id] = context.state_group
+ continue
+
+ state_events = dict(context.current_state)
+
+ if event.is_state():
+ state_events[(event.type, event.state_key)] = event
+
+ state_group = self._state_groups_id_gen.get_next_txn(txn)
+ self._simple_insert_txn(
+ txn,
+ table="state_groups",
+ values={
+ "id": state_group,
+ "room_id": event.room_id,
+ "event_id": event.event_id,
+ },
+ )
+
+ self._simple_insert_many_txn(
+ txn,
+ table="state_groups_state",
+ values=[
+ {
+ "state_group": state_group,
+ "room_id": state.room_id,
+ "type": state.type,
+ "state_key": state.state_key,
+ "event_id": state.event_id,
+ }
+ for state in state_events.values()
+ ],
+ )
+ state_groups[event.event_id] = state_group
+
+ self._simple_insert_many_txn(
+ txn,
+ table="event_to_state_groups",
+ values=[
+ {
+ "state_group": state_groups[event.event_id],
+ "event_id": event.event_id,
+ }
+ for event, context in events_and_contexts
+ if context.current_state is not None
+ ],
+ )
+
+ @defer.inlineCallbacks
+ def get_current_state(self, room_id, event_type=None, state_key=""):
+ if event_type and state_key is not None:
+ result = yield self.get_current_state_for_key(
+ room_id, event_type, state_key
+ )
+ defer.returnValue(result)
+
+ def f(txn):
+ sql = (
+ "SELECT event_id FROM current_state_events"
+ " WHERE room_id = ? "
+ )
+
+ if event_type and state_key is not None:
+ sql += " AND type = ? AND state_key = ? "
+ args = (room_id, event_type, state_key)
+ elif event_type:
+ sql += " AND type = ?"
+ args = (room_id, event_type)
+ else:
+ args = (room_id, )
+
+ txn.execute(sql, args)
+ results = txn.fetchall()
+
+ return [r[0] for r in results]
+
+ event_ids = yield self.runInteraction("get_current_state", f)
+ events = yield self._get_events(event_ids, get_prev_content=False)
+ defer.returnValue(events)
+
+ @cachedInlineCallbacks(num_args=3)
+ def get_current_state_for_key(self, room_id, event_type, state_key):
+ def f(txn):
+ sql = (
+ "SELECT event_id FROM current_state_events"
+ " WHERE room_id = ? AND type = ? AND state_key = ?"
+ )
+
+ args = (room_id, event_type, state_key)
+ txn.execute(sql, args)
+ results = txn.fetchall()
+ return [r[0] for r in results]
+ event_ids = yield self.runInteraction("get_current_state_for_key", f)
+ events = yield self._get_events(event_ids, get_prev_content=False)
+ defer.returnValue(events)
+
+ def _get_state_groups_from_groups(self, groups_and_types):
+ """Returns dictionary state_group -> state event ids
+
+ Args:
+ groups_and_types (list): list of 2-tuple (`group`, `types`)
+ """
+ def f(txn):
+ results = {}
+ for group, types in groups_and_types:
+ if types is not None:
+ where_clause = "AND (%s)" % (
+ " OR ".join(["(type = ? AND state_key = ?)"] * len(types)),
+ )
+ else:
+ where_clause = ""
+
+ sql = (
+ "SELECT event_id FROM state_groups_state WHERE"
+ " state_group = ? %s"
+ ) % (where_clause,)
+
+ args = [group]
+ if types is not None:
+ args.extend([i for typ in types for i in typ])
+
+ txn.execute(sql, args)
+
+ results[group] = [r[0] for r in txn.fetchall()]
+
+ return results
+
+ return self.runInteraction(
+ "_get_state_groups_from_groups",
+ f,
+ )
+
+ @defer.inlineCallbacks
+ def get_state_for_events(self, event_ids, types):
+ """Given a list of event_ids and type tuples, return a list of state
+ dicts for each event. The state dicts will only have the type/state_keys
+ that are in the `types` list.
+
+ Args:
+ event_ids (list)
+ types (list): List of (type, state_key) tuples which are used to
+ filter the state fetched. `state_key` may be None, which matches
+ any `state_key`
+
+ Returns:
+ deferred: A list of dicts corresponding to the event_ids given.
+ The dicts are mappings from (type, state_key) -> state_events
+ """
+ event_to_groups = yield self._get_state_group_for_events(
+ event_ids,
+ )
+
+ groups = set(event_to_groups.values())
+ group_to_state = yield self._get_state_for_groups(groups, types)
+
+ event_to_state = {
+ event_id: group_to_state[group]
+ for event_id, group in event_to_groups.items()
+ }
+
+ defer.returnValue({event: event_to_state[event] for event in event_ids})
+
+ @defer.inlineCallbacks
+ def get_state_for_event(self, event_id, types=None):
+ """
+ Get the state dict corresponding to a particular event
+
+ :param str event_id: event whose state should be returned
+ :param list[(str, str)]|None types: List of (type, state_key) tuples
+ which are used to filter the state fetched. May be None, which
+ matches any key
+ :return: a deferred dict from (type, state_key) -> state_event
+ """
+ state_map = yield self.get_state_for_events([event_id], types)
+ defer.returnValue(state_map[event_id])
+
+ @cached(num_args=2, lru=True, max_entries=10000)
+ def _get_state_group_for_event(self, room_id, event_id):
+ return self._simple_select_one_onecol(
+ table="event_to_state_groups",
+ keyvalues={
+ "event_id": event_id,
+ },
+ retcol="state_group",
+ allow_none=True,
+ desc="_get_state_group_for_event",
+ )
+
+ @cachedList(cache=_get_state_group_for_event.cache, list_name="event_ids",
+ num_args=1)
+ def _get_state_group_for_events(self, event_ids):
+ """Returns mapping event_id -> state_group
+ """
+ def f(txn):
+ results = {}
+ for event_id in event_ids:
+ results[event_id] = self._simple_select_one_onecol_txn(
+ txn,
+ table="event_to_state_groups",
+ keyvalues={
+ "event_id": event_id,
+ },
+ retcol="state_group",
+ allow_none=True,
+ )
+
+ return results
+
+ return self.runInteraction("_get_state_group_for_events", f)
+
+ def _get_some_state_from_cache(self, group, types):
+ """Checks if group is in cache. See `_get_state_for_groups`
+
+ Returns 3-tuple (`state_dict`, `missing_types`, `got_all`).
+ `missing_types` is the list of types that aren't in the cache for that
+ group. `got_all` is a bool indicating if we successfully retrieved all
+ requests state from the cache, if False we need to query the DB for the
+ missing state.
+
+ Args:
+ group: The state group to lookup
+ types (list): List of 2-tuples of the form (`type`, `state_key`),
+ where a `state_key` of `None` matches all state_keys for the
+ `type`.
+ """
+ is_all, state_dict = self._state_group_cache.get(group)
+
+ type_to_key = {}
+ missing_types = set()
+ for typ, state_key in types:
+ if state_key is None:
+ type_to_key[typ] = None
+ missing_types.add((typ, state_key))
+ else:
+ if type_to_key.get(typ, object()) is not None:
+ type_to_key.setdefault(typ, set()).add(state_key)
+
+ if (typ, state_key) not in state_dict:
+ missing_types.add((typ, state_key))
+
+ sentinel = object()
+
+ def include(typ, state_key):
+ valid_state_keys = type_to_key.get(typ, sentinel)
+ if valid_state_keys is sentinel:
+ return False
+ if valid_state_keys is None:
+ return True
+ if state_key in valid_state_keys:
+ return True
+ return False
+
+ got_all = not (missing_types or types is None)
+
+ return {
+ k: v for k, v in state_dict.items()
+ if include(k[0], k[1])
+ }, missing_types, got_all
+
+ def _get_all_state_from_cache(self, group):
+ """Checks if group is in cache. See `_get_state_for_groups`
+
+ Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool
+ indicating if we successfully retrieved all requests state from the
+ cache, if False we need to query the DB for the missing state.
+
+ Args:
+ group: The state group to lookup
+ """
+ is_all, state_dict = self._state_group_cache.get(group)
+ return state_dict, is_all
+
+ @defer.inlineCallbacks
+ def _get_state_for_groups(self, groups, types=None):
+ """Given list of groups returns dict of group -> list of state events
+ with matching types. `types` is a list of `(type, state_key)`, where
+ a `state_key` of None matches all state_keys. If `types` is None then
+ all events are returned.
+ """
+ results = {}
+ missing_groups_and_types = []
+ if types is not None:
+ for group in set(groups):
+ state_dict, missing_types, got_all = self._get_some_state_from_cache(
+ group, types
+ )
+ results[group] = state_dict
+
+ if not got_all:
+ missing_groups_and_types.append((group, missing_types))
+ else:
+ for group in set(groups):
+ state_dict, got_all = self._get_all_state_from_cache(
+ group
+ )
+ results[group] = state_dict
+
+ if not got_all:
+ missing_groups_and_types.append((group, None))
+
+ if not missing_groups_and_types:
+ defer.returnValue({
+ group: {
+ type_tuple: event
+ for type_tuple, event in state.items()
+ if event
+ }
+ for group, state in results.items()
+ })
+
+ # Okay, so we have some missing_types, lets fetch them.
+ cache_seq_num = self._state_group_cache.sequence
+
+ group_state_dict = yield self._get_state_groups_from_groups(
+ missing_groups_and_types
+ )
+
+ state_events = yield self._get_events(
+ [e_id for l in group_state_dict.values() for e_id in l],
+ get_prev_content=False
+ )
+
+ state_events = {e.event_id: e for e in state_events}
+
+ # Now we want to update the cache with all the things we fetched
+ # from the database.
+ for group, state_ids in group_state_dict.items():
+ if types:
+ # We delibrately put key -> None mappings into the cache to
+ # cache absence of the key, on the assumption that if we've
+ # explicitly asked for some types then we will probably ask
+ # for them again.
+ state_dict = {key: None for key in types}
+ state_dict.update(results[group])
+ results[group] = state_dict
+ else:
+ state_dict = results[group]
+
+ for event_id in state_ids:
+ try:
+ state_event = state_events[event_id]
+ state_dict[(state_event.type, state_event.state_key)] = state_event
+ except KeyError:
+ # Hmm. So we do don't have that state event? Interesting.
+ logger.warn(
+ "Can't find state event %r for state group %r",
+ event_id, group,
+ )
+
+ self._state_group_cache.update(
+ cache_seq_num,
+ key=group,
+ value=state_dict,
+ full=(types is None),
+ )
+
+ # Remove all the entries with None values. The None values were just
+ # used for bookkeeping in the cache.
+ for group, state_dict in results.items():
+ results[group] = {
+ key: event for key, event in state_dict.items() if event
+ }
+
+ defer.returnValue(results)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
new file mode 100644
index 00000000..be8ba76a
--- /dev/null
+++ b/synapse/storage/stream.py
@@ -0,0 +1,602 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" This module is responsible for getting events from the DB for pagination
+and event streaming.
+
+The order it returns events in depend on whether we are streaming forwards or
+are paginating backwards. We do this because we want to handle out of order
+messages nicely, while still returning them in the correct order when we
+paginate bacwards.
+
+This is implemented by keeping two ordering columns: stream_ordering and
+topological_ordering. Stream ordering is basically insertion/received order
+(except for events from backfill requests). The topological_ordering is a
+weak ordering of events based on the pdu graph.
+
+This means that we have to have two different types of tokens, depending on
+what sort order was used:
+ - stream tokens are of the form: "s%d", which maps directly to the column
+ - topological tokems: "t%d-%d", where the integers map to the topological
+ and stream ordering columns respectively.
+"""
+
+from twisted.internet import defer
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.api.constants import EventTypes
+from synapse.types import RoomStreamToken
+from synapse.util.logutils import log_function
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+MAX_STREAM_SIZE = 1000
+
+
+_STREAM_TOKEN = "stream"
+_TOPOLOGICAL_TOKEN = "topological"
+
+
+def lower_bound(token):
+ if token.topological is None:
+ return "(%d < %s)" % (token.stream, "stream_ordering")
+ else:
+ return "(%d < %s OR (%d = %s AND %d < %s))" % (
+ token.topological, "topological_ordering",
+ token.topological, "topological_ordering",
+ token.stream, "stream_ordering",
+ )
+
+
+def upper_bound(token):
+ if token.topological is None:
+ return "(%d >= %s)" % (token.stream, "stream_ordering")
+ else:
+ return "(%d > %s OR (%d = %s AND %d >= %s))" % (
+ token.topological, "topological_ordering",
+ token.topological, "topological_ordering",
+ token.stream, "stream_ordering",
+ )
+
+
+class StreamStore(SQLBaseStore):
+
+ @defer.inlineCallbacks
+ def get_appservice_room_stream(self, service, from_key, to_key, limit=0):
+ # NB this lives here instead of appservice.py so we can reuse the
+ # 'private' StreamToken class in this file.
+ if limit:
+ limit = max(limit, MAX_STREAM_SIZE)
+ else:
+ limit = MAX_STREAM_SIZE
+
+ # From and to keys should be integers from ordering.
+ from_id = RoomStreamToken.parse_stream_token(from_key)
+ to_id = RoomStreamToken.parse_stream_token(to_key)
+
+ if from_key == to_key:
+ defer.returnValue(([], to_key))
+ return
+
+ # select all the events between from/to with a sensible limit
+ sql = (
+ "SELECT e.event_id, e.room_id, e.type, s.state_key, "
+ "e.stream_ordering FROM events AS e "
+ "LEFT JOIN state_events as s ON "
+ "e.event_id = s.event_id "
+ "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? "
+ "ORDER BY stream_ordering ASC LIMIT %(limit)d "
+ ) % {
+ "limit": limit
+ }
+
+ def f(txn):
+ # pull out all the events between the tokens
+ txn.execute(sql, (from_id.stream, to_id.stream,))
+ rows = self.cursor_to_dict(txn)
+
+ # Logic:
+ # - We want ALL events which match the AS room_id regex
+ # - We want ALL events which match the rooms represented by the AS
+ # room_alias regex
+ # - We want ALL events for rooms that AS users have joined.
+ # This is currently supported via get_app_service_rooms (which is
+ # used for the Notifier listener rooms). We can't reasonably make a
+ # SQL query for these room IDs, so we'll pull all the events between
+ # from/to and filter in python.
+ rooms_for_as = self._get_app_service_rooms_txn(txn, service)
+ room_ids_for_as = [r.room_id for r in rooms_for_as]
+
+ def app_service_interested(row):
+ if row["room_id"] in room_ids_for_as:
+ return True
+
+ if row["type"] == EventTypes.Member:
+ if service.is_interested_in_user(row.get("state_key")):
+ return True
+ return False
+
+ ret = self._get_events_txn(
+ txn,
+ # apply the filter on the room id list
+ [
+ r["event_id"] for r in rows
+ if app_service_interested(r)
+ ],
+ get_prev_content=True
+ )
+
+ self._set_before_and_after(ret, rows)
+
+ if rows:
+ key = "s%d" % max(r["stream_ordering"] for r in rows)
+ else:
+ # Assume we didn't get anything because there was nothing to
+ # get.
+ key = to_key
+
+ return ret, key
+
+ results = yield self.runInteraction("get_appservice_room_stream", f)
+ defer.returnValue(results)
+
+ @log_function
+ def get_room_events_stream(
+ self,
+ user_id,
+ from_key,
+ to_key,
+ limit=0,
+ is_guest=False,
+ room_ids=None
+ ):
+ room_ids = room_ids or []
+ room_ids = [r for r in room_ids]
+ if is_guest:
+ current_room_membership_sql = (
+ "SELECT c.room_id FROM history_visibility AS h"
+ " INNER JOIN current_state_events AS c"
+ " ON h.event_id = c.event_id"
+ " WHERE c.room_id IN (%s) AND h.history_visibility = 'world_readable'" % (
+ ",".join(map(lambda _: "?", room_ids))
+ )
+ )
+ current_room_membership_args = room_ids
+ else:
+ current_room_membership_sql = (
+ "SELECT m.room_id FROM room_memberships as m "
+ " INNER JOIN current_state_events as c"
+ " ON m.event_id = c.event_id AND c.state_key = m.user_id"
+ " WHERE m.user_id = ? AND m.membership = 'join'"
+ )
+ current_room_membership_args = [user_id]
+ if room_ids:
+ current_room_membership_sql += " AND m.room_id in (%s)" % (
+ ",".join(map(lambda _: "?", room_ids))
+ )
+ current_room_membership_args = [user_id] + room_ids
+
+ # We also want to get any membership events about that user, e.g.
+ # invites or leave notifications.
+ membership_sql = (
+ "SELECT m.event_id FROM room_memberships as m "
+ "INNER JOIN current_state_events as c ON m.event_id = c.event_id "
+ "WHERE m.user_id = ? "
+ )
+ membership_args = [user_id]
+
+ if limit:
+ limit = max(limit, MAX_STREAM_SIZE)
+ else:
+ limit = MAX_STREAM_SIZE
+
+ # From and to keys should be integers from ordering.
+ from_id = RoomStreamToken.parse_stream_token(from_key)
+ to_id = RoomStreamToken.parse_stream_token(to_key)
+
+ if from_key == to_key:
+ return defer.succeed(([], to_key))
+
+ sql = (
+ "SELECT e.event_id, e.stream_ordering FROM events AS e WHERE "
+ "(e.outlier = ? AND (room_id IN (%(current)s)) OR "
+ "(event_id IN (%(invites)s))) "
+ "AND e.stream_ordering > ? AND e.stream_ordering <= ? "
+ "ORDER BY stream_ordering ASC LIMIT %(limit)d "
+ ) % {
+ "current": current_room_membership_sql,
+ "invites": membership_sql,
+ "limit": limit
+ }
+
+ def f(txn):
+ args = ([False] + current_room_membership_args + membership_args +
+ [from_id.stream, to_id.stream])
+ txn.execute(sql, args)
+
+ rows = self.cursor_to_dict(txn)
+
+ ret = self._get_events_txn(
+ txn,
+ [r["event_id"] for r in rows],
+ get_prev_content=True
+ )
+
+ self._set_before_and_after(ret, rows)
+
+ if rows:
+ key = "s%d" % max(r["stream_ordering"] for r in rows)
+ else:
+ # Assume we didn't get anything because there was nothing to
+ # get.
+ key = to_key
+
+ return ret, key
+
+ return self.runInteraction("get_room_events_stream", f)
+
+ @defer.inlineCallbacks
+ def paginate_room_events(self, room_id, from_key, to_key=None,
+ direction='b', limit=-1):
+ # Tokens really represent positions between elements, but we use
+ # the convention of pointing to the event before the gap. Hence
+ # we have a bit of asymmetry when it comes to equalities.
+ args = [False, room_id]
+ if direction == 'b':
+ order = "DESC"
+ bounds = upper_bound(RoomStreamToken.parse(from_key))
+ if to_key:
+ bounds = "%s AND %s" % (
+ bounds, lower_bound(RoomStreamToken.parse(to_key))
+ )
+ else:
+ order = "ASC"
+ bounds = lower_bound(RoomStreamToken.parse(from_key))
+ if to_key:
+ bounds = "%s AND %s" % (
+ bounds, upper_bound(RoomStreamToken.parse(to_key))
+ )
+
+ if int(limit) > 0:
+ args.append(int(limit))
+ limit_str = " LIMIT ?"
+ else:
+ limit_str = ""
+
+ sql = (
+ "SELECT * FROM events"
+ " WHERE outlier = ? AND room_id = ? AND %(bounds)s"
+ " ORDER BY topological_ordering %(order)s,"
+ " stream_ordering %(order)s %(limit)s"
+ ) % {
+ "bounds": bounds,
+ "order": order,
+ "limit": limit_str
+ }
+
+ def f(txn):
+ txn.execute(sql, args)
+
+ rows = self.cursor_to_dict(txn)
+
+ if rows:
+ topo = rows[-1]["topological_ordering"]
+ toke = rows[-1]["stream_ordering"]
+ if direction == 'b':
+ # Tokens are positions between events.
+ # This token points *after* the last event in the chunk.
+ # We need it to point to the event before it in the chunk
+ # when we are going backwards so we subtract one from the
+ # stream part.
+ toke -= 1
+ next_token = str(RoomStreamToken(topo, toke))
+ else:
+ # TODO (erikj): We should work out what to do here instead.
+ next_token = to_key if to_key else from_key
+
+ return rows, next_token,
+
+ rows, token = yield self.runInteraction("paginate_room_events", f)
+
+ events = yield self._get_events(
+ [r["event_id"] for r in rows],
+ get_prev_content=True
+ )
+
+ self._set_before_and_after(events, rows)
+
+ defer.returnValue((events, token))
+
+ @cachedInlineCallbacks(num_args=4)
+ def get_recent_events_for_room(self, room_id, limit, end_token, from_token=None):
+
+ end_token = RoomStreamToken.parse_stream_token(end_token)
+
+ if from_token is None:
+ sql = (
+ "SELECT stream_ordering, topological_ordering, event_id"
+ " FROM events"
+ " WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC"
+ " LIMIT ?"
+ )
+ else:
+ from_token = RoomStreamToken.parse_stream_token(from_token)
+ sql = (
+ "SELECT stream_ordering, topological_ordering, event_id"
+ " FROM events"
+ " WHERE room_id = ? AND stream_ordering > ?"
+ " AND stream_ordering <= ? AND outlier = ?"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC"
+ " LIMIT ?"
+ )
+
+ def get_recent_events_for_room_txn(txn):
+ if from_token is None:
+ txn.execute(sql, (room_id, end_token.stream, False, limit,))
+ else:
+ txn.execute(sql, (
+ room_id, from_token.stream, end_token.stream, False, limit
+ ))
+
+ rows = self.cursor_to_dict(txn)
+
+ rows.reverse() # As we selected with reverse ordering
+
+ if rows:
+ # Tokens are positions between events.
+ # This token points *after* the last event in the chunk.
+ # We need it to point to the event before it in the chunk
+ # since we are going backwards so we subtract one from the
+ # stream part.
+ topo = rows[0]["topological_ordering"]
+ toke = rows[0]["stream_ordering"] - 1
+ start_token = str(RoomStreamToken(topo, toke))
+
+ token = (start_token, str(end_token))
+ else:
+ token = (str(end_token), str(end_token))
+
+ return rows, token
+
+ rows, token = yield self.runInteraction(
+ "get_recent_events_for_room", get_recent_events_for_room_txn
+ )
+
+ logger.debug("stream before")
+ events = yield self._get_events(
+ [r["event_id"] for r in rows],
+ get_prev_content=True
+ )
+ logger.debug("stream after")
+
+ self._set_before_and_after(events, rows)
+
+ defer.returnValue((events, token))
+
+ @defer.inlineCallbacks
+ def get_room_events_max_id(self, direction='f'):
+ token = yield self._stream_id_gen.get_max_token(self)
+ if direction != 'b':
+ defer.returnValue("s%d" % (token,))
+ else:
+ topo = yield self.runInteraction(
+ "_get_max_topological_txn", self._get_max_topological_txn
+ )
+ defer.returnValue("t%d-%d" % (topo, token))
+
+ def get_stream_token_for_event(self, event_id):
+ """The stream token for an event
+ Args:
+ event_id(str): The id of the event to look up a stream token for.
+ Raises:
+ StoreError if the event wasn't in the database.
+ Returns:
+ A deferred "s%d" stream token.
+ """
+ return self._simple_select_one_onecol(
+ table="events",
+ keyvalues={"event_id": event_id},
+ retcol="stream_ordering",
+ ).addCallback(lambda row: "s%d" % (row,))
+
+ def get_topological_token_for_event(self, event_id):
+ """The stream token for an event
+ Args:
+ event_id(str): The id of the event to look up a stream token for.
+ Raises:
+ StoreError if the event wasn't in the database.
+ Returns:
+ A deferred "t%d-%d" topological token.
+ """
+ return self._simple_select_one(
+ table="events",
+ keyvalues={"event_id": event_id},
+ retcols=("stream_ordering", "topological_ordering"),
+ ).addCallback(lambda row: "t%d-%d" % (
+ row["topological_ordering"], row["stream_ordering"],)
+ )
+
+ def _get_max_topological_txn(self, txn):
+ txn.execute(
+ "SELECT MAX(topological_ordering) FROM events"
+ " WHERE outlier = ?",
+ (False,)
+ )
+
+ rows = txn.fetchall()
+ return rows[0][0] if rows else 0
+
+ @defer.inlineCallbacks
+ def _get_min_token(self):
+ row = yield self._execute(
+ "_get_min_token", None, "SELECT MIN(stream_ordering) FROM events"
+ )
+
+ self.min_token = row[0][0] if row and row[0] and row[0][0] else -1
+ self.min_token = min(self.min_token, -1)
+
+ logger.debug("min_token is: %s", self.min_token)
+
+ defer.returnValue(self.min_token)
+
+ @staticmethod
+ def _set_before_and_after(events, rows):
+ for event, row in zip(events, rows):
+ stream = row["stream_ordering"]
+ topo = event.depth
+ internal = event.internal_metadata
+ internal.before = str(RoomStreamToken(topo, stream - 1))
+ internal.after = str(RoomStreamToken(topo, stream))
+
+ @defer.inlineCallbacks
+ def get_events_around(self, room_id, event_id, before_limit, after_limit):
+ """Retrieve events and pagination tokens around a given event in a
+ room.
+
+ Args:
+ room_id (str)
+ event_id (str)
+ before_limit (int)
+ after_limit (int)
+
+ Returns:
+ dict
+ """
+
+ results = yield self.runInteraction(
+ "get_events_around", self._get_events_around_txn,
+ room_id, event_id, before_limit, after_limit
+ )
+
+ events_before = yield self._get_events(
+ [e for e in results["before"]["event_ids"]],
+ get_prev_content=True
+ )
+
+ events_after = yield self._get_events(
+ [e for e in results["after"]["event_ids"]],
+ get_prev_content=True
+ )
+
+ defer.returnValue({
+ "events_before": events_before,
+ "events_after": events_after,
+ "start": results["before"]["token"],
+ "end": results["after"]["token"],
+ })
+
+ def _get_events_around_txn(self, txn, room_id, event_id, before_limit, after_limit):
+ """Retrieves event_ids and pagination tokens around a given event in a
+ room.
+
+ Args:
+ room_id (str)
+ event_id (str)
+ before_limit (int)
+ after_limit (int)
+
+ Returns:
+ dict
+ """
+
+ results = self._simple_select_one_txn(
+ txn,
+ "events",
+ keyvalues={
+ "event_id": event_id,
+ "room_id": room_id,
+ },
+ retcols=["stream_ordering", "topological_ordering"],
+ )
+
+ stream_ordering = results["stream_ordering"]
+ topological_ordering = results["topological_ordering"]
+
+ query_before = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND (topological_ordering < ?"
+ " OR (topological_ordering = ? AND stream_ordering < ?))"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC"
+ " LIMIT ?"
+ )
+
+ query_after = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND (topological_ordering > ?"
+ " OR (topological_ordering = ? AND stream_ordering > ?))"
+ " ORDER BY topological_ordering ASC, stream_ordering ASC"
+ " LIMIT ?"
+ )
+
+ txn.execute(
+ query_before,
+ (
+ room_id, topological_ordering, topological_ordering,
+ stream_ordering, before_limit,
+ )
+ )
+
+ rows = self.cursor_to_dict(txn)
+ events_before = [r["event_id"] for r in rows]
+
+ if rows:
+ start_token = str(RoomStreamToken(
+ rows[0]["topological_ordering"],
+ rows[0]["stream_ordering"] - 1,
+ ))
+ else:
+ start_token = str(RoomStreamToken(
+ topological_ordering,
+ stream_ordering - 1,
+ ))
+
+ txn.execute(
+ query_after,
+ (
+ room_id, topological_ordering, topological_ordering,
+ stream_ordering, after_limit,
+ )
+ )
+
+ rows = self.cursor_to_dict(txn)
+ events_after = [r["event_id"] for r in rows]
+
+ if rows:
+ end_token = str(RoomStreamToken(
+ rows[-1]["topological_ordering"],
+ rows[-1]["stream_ordering"],
+ ))
+ else:
+ end_token = str(RoomStreamToken(
+ topological_ordering,
+ stream_ordering,
+ ))
+
+ return {
+ "before": {
+ "event_ids": events_before,
+ "token": start_token,
+ },
+ "after": {
+ "event_ids": events_after,
+ "token": end_token,
+ },
+ }
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
new file mode 100644
index 00000000..bf695b78
--- /dev/null
+++ b/synapse/storage/tags.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+from twisted.internet import defer
+from .util.id_generators import StreamIdGenerator
+
+import ujson as json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class TagsStore(SQLBaseStore):
+ def __init__(self, hs):
+ super(TagsStore, self).__init__(hs)
+
+ self._private_user_data_id_gen = StreamIdGenerator(
+ "private_user_data_max_stream_id", "stream_id"
+ )
+
+ def get_max_private_user_data_stream_id(self):
+ """Get the current max stream id for the private user data stream
+
+ Returns:
+ A deferred int.
+ """
+ return self._private_user_data_id_gen.get_max_token(self)
+
+ @cached()
+ def get_tags_for_user(self, user_id):
+ """Get all the tags for a user.
+
+
+ Args:
+ user_id(str): The user to get the tags for.
+ Returns:
+ A deferred dict mapping from room_id strings to lists of tag
+ strings.
+ """
+
+ deferred = self._simple_select_list(
+ "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"]
+ )
+
+ @deferred.addCallback
+ def tags_by_room(rows):
+ tags_by_room = {}
+ for row in rows:
+ room_tags = tags_by_room.setdefault(row["room_id"], {})
+ room_tags[row["tag"]] = json.loads(row["content"])
+ return tags_by_room
+
+ return deferred
+
+ @defer.inlineCallbacks
+ def get_updated_tags(self, user_id, stream_id):
+ """Get all the tags for the rooms where the tags have changed since the
+ given version
+
+ Args:
+ user_id(str): The user to get the tags for.
+ stream_id(int): The earliest update to get for the user.
+ Returns:
+ A deferred dict mapping from room_id strings to lists of tag
+ strings for all the rooms that changed since the stream_id token.
+ """
+ def get_updated_tags_txn(txn):
+ sql = (
+ "SELECT room_id from room_tags_revisions"
+ " WHERE user_id = ? AND stream_id > ?"
+ )
+ txn.execute(sql, (user_id, stream_id))
+ room_ids = [row[0] for row in txn.fetchall()]
+ return room_ids
+
+ room_ids = yield self.runInteraction(
+ "get_updated_tags", get_updated_tags_txn
+ )
+
+ results = {}
+ if room_ids:
+ tags_by_room = yield self.get_tags_for_user(user_id)
+ for room_id in room_ids:
+ results[room_id] = tags_by_room.get(room_id, {})
+
+ defer.returnValue(results)
+
+ def get_tags_for_room(self, user_id, room_id):
+ """Get all the tags for the given room
+ Args:
+ user_id(str): The user to get tags for
+ room_id(str): The room to get tags for
+ Returns:
+ A deferred list of string tags.
+ """
+ return self._simple_select_list(
+ table="room_tags",
+ keyvalues={"user_id": user_id, "room_id": room_id},
+ retcols=("tag", "content"),
+ desc="get_tags_for_room",
+ ).addCallback(lambda rows: {
+ row["tag"]: json.loads(row["content"]) for row in rows
+ })
+
+ @defer.inlineCallbacks
+ def add_tag_to_room(self, user_id, room_id, tag, content):
+ """Add a tag to a room for a user.
+ Args:
+ user_id(str): The user to add a tag for.
+ room_id(str): The room to add a tag for.
+ tag(str): The tag name to add.
+ content(dict): A json object to associate with the tag.
+ Returns:
+ A deferred that completes once the tag has been added.
+ """
+ content_json = json.dumps(content)
+
+ def add_tag_txn(txn, next_id):
+ self._simple_upsert_txn(
+ txn,
+ table="room_tags",
+ keyvalues={
+ "user_id": user_id,
+ "room_id": room_id,
+ "tag": tag,
+ },
+ values={
+ "content": content_json,
+ }
+ )
+ self._update_revision_txn(txn, user_id, room_id, next_id)
+
+ with (yield self._private_user_data_id_gen.get_next(self)) as next_id:
+ yield self.runInteraction("add_tag", add_tag_txn, next_id)
+
+ self.get_tags_for_user.invalidate((user_id,))
+
+ result = yield self._private_user_data_id_gen.get_max_token(self)
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def remove_tag_from_room(self, user_id, room_id, tag):
+ """Remove a tag from a room for a user.
+ Returns:
+ A deferred that completes once the tag has been removed
+ """
+ def remove_tag_txn(txn, next_id):
+ sql = (
+ "DELETE FROM room_tags "
+ " WHERE user_id = ? AND room_id = ? AND tag = ?"
+ )
+ txn.execute(sql, (user_id, room_id, tag))
+ self._update_revision_txn(txn, user_id, room_id, next_id)
+
+ with (yield self._private_user_data_id_gen.get_next(self)) as next_id:
+ yield self.runInteraction("remove_tag", remove_tag_txn, next_id)
+
+ self.get_tags_for_user.invalidate((user_id,))
+
+ result = yield self._private_user_data_id_gen.get_max_token(self)
+ defer.returnValue(result)
+
+ def _update_revision_txn(self, txn, user_id, room_id, next_id):
+ """Update the latest revision of the tags for the given user and room.
+
+ Args:
+ txn: The database cursor
+ user_id(str): The ID of the user.
+ room_id(str): The ID of the room.
+ next_id(int): The the revision to advance to.
+ """
+
+ update_max_id_sql = (
+ "UPDATE private_user_data_max_stream_id"
+ " SET stream_id = ?"
+ " WHERE stream_id < ?"
+ )
+ txn.execute(update_max_id_sql, (next_id, next_id))
+
+ update_sql = (
+ "UPDATE room_tags_revisions"
+ " SET stream_id = ?"
+ " WHERE user_id = ?"
+ " AND room_id = ?"
+ )
+ txn.execute(update_sql, (next_id, user_id, room_id))
+
+ if txn.rowcount == 0:
+ insert_sql = (
+ "INSERT INTO room_tags_revisions (user_id, room_id, stream_id)"
+ " VALUES (?, ?, ?)"
+ )
+ try:
+ txn.execute(insert_sql, (user_id, room_id, next_id))
+ except self.database_engine.module.IntegrityError:
+ # Ignore insertion errors. It doesn't matter if the row wasn't
+ # inserted because if two updates happend concurrently the one
+ # with the higher stream_id will not be reported to a client
+ # unless the previous update has completed. It doesn't matter
+ # which stream_id ends up in the table, as long as it is higher
+ # than the id that the client has.
+ pass
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
new file mode 100644
index 00000000..ad099775
--- /dev/null
+++ b/synapse/storage/transactions.py
@@ -0,0 +1,355 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
+
+from collections import namedtuple
+
+from canonicaljson import encode_canonical_json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class TransactionStore(SQLBaseStore):
+ """A collection of queries for handling PDUs.
+ """
+
+ def get_received_txn_response(self, transaction_id, origin):
+ """For an incoming transaction from a given origin, check if we have
+ already responded to it. If so, return the response code and response
+ body (as a dict).
+
+ Args:
+ transaction_id (str)
+ origin(str)
+
+ Returns:
+ tuple: None if we have not previously responded to
+ this transaction or a 2-tuple of (int, dict)
+ """
+
+ return self.runInteraction(
+ "get_received_txn_response",
+ self._get_received_txn_response, transaction_id, origin
+ )
+
+ def _get_received_txn_response(self, txn, transaction_id, origin):
+ result = self._simple_select_one_txn(
+ txn,
+ table=ReceivedTransactionsTable.table_name,
+ keyvalues={
+ "transaction_id": transaction_id,
+ "origin": origin,
+ },
+ retcols=ReceivedTransactionsTable.fields,
+ allow_none=True,
+ )
+
+ if result and result["response_code"]:
+ return result["response_code"], result["response_json"]
+ else:
+ return None
+
+ def set_received_txn_response(self, transaction_id, origin, code,
+ response_dict):
+ """Persist the response we returened for an incoming transaction, and
+ should return for subsequent transactions with the same transaction_id
+ and origin.
+
+ Args:
+ txn
+ transaction_id (str)
+ origin (str)
+ code (int)
+ response_json (str)
+ """
+
+ return self._simple_insert(
+ table=ReceivedTransactionsTable.table_name,
+ values={
+ "transaction_id": transaction_id,
+ "origin": origin,
+ "response_code": code,
+ "response_json": buffer(encode_canonical_json(response_dict)),
+ },
+ or_ignore=True,
+ desc="set_received_txn_response",
+ )
+
+ def prep_send_transaction(self, transaction_id, destination,
+ origin_server_ts):
+ """Persists an outgoing transaction and calculates the values for the
+ previous transaction id list.
+
+ This should be called before sending the transaction so that it has the
+ correct value for the `prev_ids` key.
+
+ Args:
+ transaction_id (str)
+ destination (str)
+ origin_server_ts (int)
+
+ Returns:
+ list: A list of previous transaction ids.
+ """
+
+ return self.runInteraction(
+ "prep_send_transaction",
+ self._prep_send_transaction,
+ transaction_id, destination, origin_server_ts
+ )
+
+ def _prep_send_transaction(self, txn, transaction_id, destination,
+ origin_server_ts):
+
+ next_id = self._transaction_id_gen.get_next_txn(txn)
+
+ # First we find out what the prev_txns should be.
+ # Since we know that we are only sending one transaction at a time,
+ # we can simply take the last one.
+ query = (
+ "SELECT * FROM sent_transactions"
+ " WHERE destination = ?"
+ " ORDER BY id DESC LIMIT 1"
+ )
+
+ txn.execute(query, (destination,))
+ results = self.cursor_to_dict(txn)
+
+ prev_txns = [r["transaction_id"] for r in results]
+
+ # Actually add the new transaction to the sent_transactions table.
+
+ self._simple_insert_txn(
+ txn,
+ table=SentTransactions.table_name,
+ values={
+ "id": next_id,
+ "transaction_id": transaction_id,
+ "destination": destination,
+ "ts": origin_server_ts,
+ "response_code": 0,
+ "response_json": None,
+ }
+ )
+
+ # TODO Update the tx id -> pdu id mapping
+
+ return prev_txns
+
+ def delivered_txn(self, transaction_id, destination, code, response_dict):
+ """Persists the response for an outgoing transaction.
+
+ Args:
+ transaction_id (str)
+ destination (str)
+ code (int)
+ response_json (str)
+ """
+ return self.runInteraction(
+ "delivered_txn",
+ self._delivered_txn,
+ transaction_id, destination, code,
+ buffer(encode_canonical_json(response_dict)),
+ )
+
+ def _delivered_txn(self, txn, transaction_id, destination,
+ code, response_json):
+ self._simple_update_one_txn(
+ txn,
+ table=SentTransactions.table_name,
+ keyvalues={
+ "transaction_id": transaction_id,
+ "destination": destination,
+ },
+ updatevalues={
+ "response_code": code,
+ "response_json": None, # For now, don't persist response_json
+ }
+ )
+
+ def get_transactions_after(self, transaction_id, destination):
+ """Get all transactions after a given local transaction_id.
+
+ Args:
+ transaction_id (str)
+ destination (str)
+
+ Returns:
+ list: A list of dicts
+ """
+ return self.runInteraction(
+ "get_transactions_after",
+ self._get_transactions_after, transaction_id, destination
+ )
+
+ def _get_transactions_after(self, txn, transaction_id, destination):
+ query = (
+ "SELECT * FROM sent_transactions"
+ " WHERE destination = ? AND id >"
+ " ("
+ " SELECT id FROM sent_transactions"
+ " WHERE transaction_id = ? AND destination = ?"
+ " )"
+ )
+
+ txn.execute(query, (destination, transaction_id, destination))
+
+ return self.cursor_to_dict(txn)
+
+ @cached()
+ def get_destination_retry_timings(self, destination):
+ """Gets the current retry timings (if any) for a given destination.
+
+ Args:
+ destination (str)
+
+ Returns:
+ None if not retrying
+ Otherwise a dict for the retry scheme
+ """
+ return self.runInteraction(
+ "get_destination_retry_timings",
+ self._get_destination_retry_timings, destination)
+
+ def _get_destination_retry_timings(self, txn, destination):
+ result = self._simple_select_one_txn(
+ txn,
+ table=DestinationsTable.table_name,
+ keyvalues={
+ "destination": destination,
+ },
+ retcols=DestinationsTable.fields,
+ allow_none=True,
+ )
+
+ if result and result["retry_last_ts"] > 0:
+ return result
+ else:
+ return None
+
+ def set_destination_retry_timings(self, destination,
+ retry_last_ts, retry_interval):
+ """Sets the current retry timings for a given destination.
+ Both timings should be zero if retrying is no longer occuring.
+
+ Args:
+ destination (str)
+ retry_last_ts (int) - time of last retry attempt in unix epoch ms
+ retry_interval (int) - how long until next retry in ms
+ """
+
+ # XXX: we could chose to not bother persisting this if our cache thinks
+ # this is a NOOP
+ return self.runInteraction(
+ "set_destination_retry_timings",
+ self._set_destination_retry_timings,
+ destination,
+ retry_last_ts,
+ retry_interval,
+ )
+
+ def _set_destination_retry_timings(self, txn, destination,
+ retry_last_ts, retry_interval):
+ txn.call_after(self.get_destination_retry_timings.invalidate, (destination,))
+
+ self._simple_upsert_txn(
+ txn,
+ "destinations",
+ keyvalues={
+ "destination": destination,
+ },
+ values={
+ "retry_last_ts": retry_last_ts,
+ "retry_interval": retry_interval,
+ },
+ insertion_values={
+ "destination": destination,
+ "retry_last_ts": retry_last_ts,
+ "retry_interval": retry_interval,
+ }
+ )
+
+ def get_destinations_needing_retry(self):
+ """Get all destinations which are due a retry for sending a transaction.
+
+ Returns:
+ list: A list of dicts
+ """
+
+ return self.runInteraction(
+ "get_destinations_needing_retry",
+ self._get_destinations_needing_retry
+ )
+
+ def _get_destinations_needing_retry(self, txn):
+ query = (
+ "SELECT * FROM destinations"
+ " WHERE retry_last_ts > 0 and retry_next_ts < ?"
+ )
+
+ txn.execute(query, (self._clock.time_msec(),))
+ return self.cursor_to_dict(txn)
+
+
+class ReceivedTransactionsTable(object):
+ table_name = "received_transactions"
+
+ fields = [
+ "transaction_id",
+ "origin",
+ "ts",
+ "response_code",
+ "response_json",
+ "has_been_referenced",
+ ]
+
+
+class SentTransactions(object):
+ table_name = "sent_transactions"
+
+ fields = [
+ "id",
+ "transaction_id",
+ "destination",
+ "ts",
+ "response_code",
+ "response_json",
+ ]
+
+ EntryType = namedtuple("SentTransactionsEntry", fields)
+
+
+class TransactionsToPduTable(object):
+ table_name = "transaction_id_to_pdu"
+
+ fields = [
+ "transaction_id",
+ "destination",
+ "pdu_id",
+ "pdu_origin",
+ ]
+
+
+class DestinationsTable(object):
+ table_name = "destinations"
+
+ fields = [
+ "destination",
+ "retry_last_ts",
+ "retry_interval",
+ ]
diff --git a/synapse/storage/util/__init__.py b/synapse/storage/util/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/storage/util/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
new file mode 100644
index 00000000..e956df62
--- /dev/null
+++ b/synapse/storage/util/id_generators.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from collections import deque
+import contextlib
+import threading
+
+
+class IdGenerator(object):
+ def __init__(self, table, column, store):
+ self.table = table
+ self.column = column
+ self.store = store
+ self._lock = threading.Lock()
+ self._next_id = None
+
+ @defer.inlineCallbacks
+ def get_next(self):
+ if self._next_id is None:
+ yield self.store.runInteraction(
+ "IdGenerator_%s" % (self.table,),
+ self.get_next_txn,
+ )
+
+ with self._lock:
+ i = self._next_id
+ self._next_id += 1
+ defer.returnValue(i)
+
+ def get_next_txn(self, txn):
+ with self._lock:
+ if self._next_id:
+ i = self._next_id
+ self._next_id += 1
+ return i
+ else:
+ txn.execute(
+ "SELECT MAX(%s) FROM %s" % (self.column, self.table,)
+ )
+
+ val, = txn.fetchone()
+ cur = val or 0
+ cur += 1
+ self._next_id = cur + 1
+
+ return cur
+
+
+class StreamIdGenerator(object):
+ """Used to generate new stream ids when persisting events while keeping
+ track of which transactions have been completed.
+
+ This allows us to get the "current" stream id, i.e. the stream id such that
+ all ids less than or equal to it have completed. This handles the fact that
+ persistence of events can complete out of order.
+
+ Usage:
+ with stream_id_gen.get_next_txn(txn) as stream_id:
+ # ... persist event ...
+ """
+ def __init__(self, table, column):
+ self.table = table
+ self.column = column
+
+ self._lock = threading.Lock()
+
+ self._current_max = None
+ self._unfinished_ids = deque()
+
+ @defer.inlineCallbacks
+ def get_next(self, store):
+ """
+ Usage:
+ with yield stream_id_gen.get_next as stream_id:
+ # ... persist event ...
+ """
+ if not self._current_max:
+ yield store.runInteraction(
+ "_compute_current_max",
+ self._get_or_compute_current_max,
+ )
+
+ with self._lock:
+ self._current_max += 1
+ next_id = self._current_max
+
+ self._unfinished_ids.append(next_id)
+
+ @contextlib.contextmanager
+ def manager():
+ try:
+ yield next_id
+ finally:
+ with self._lock:
+ self._unfinished_ids.remove(next_id)
+
+ defer.returnValue(manager())
+
+ @defer.inlineCallbacks
+ def get_next_mult(self, store, n):
+ """
+ Usage:
+ with yield stream_id_gen.get_next(store, n) as stream_ids:
+ # ... persist events ...
+ """
+ if not self._current_max:
+ yield store.runInteraction(
+ "_compute_current_max",
+ self._get_or_compute_current_max,
+ )
+
+ with self._lock:
+ next_ids = range(self._current_max + 1, self._current_max + n + 1)
+ self._current_max += n
+
+ for next_id in next_ids:
+ self._unfinished_ids.append(next_id)
+
+ @contextlib.contextmanager
+ def manager():
+ try:
+ yield next_ids
+ finally:
+ with self._lock:
+ for next_id in next_ids:
+ self._unfinished_ids.remove(next_id)
+
+ defer.returnValue(manager())
+
+ @defer.inlineCallbacks
+ def get_max_token(self, store):
+ """Returns the maximum stream id such that all stream ids less than or
+ equal to it have been successfully persisted.
+ """
+ if not self._current_max:
+ yield store.runInteraction(
+ "_compute_current_max",
+ self._get_or_compute_current_max,
+ )
+
+ with self._lock:
+ if self._unfinished_ids:
+ defer.returnValue(self._unfinished_ids[0] - 1)
+
+ defer.returnValue(self._current_max)
+
+ def _get_or_compute_current_max(self, txn):
+ with self._lock:
+ txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table))
+ rows = txn.fetchall()
+ val, = rows[0]
+
+ self._current_max = int(val) if val else 1
+
+ return self._current_max
diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py
new file mode 100644
index 00000000..c488b10d
--- /dev/null
+++ b/synapse/streams/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
new file mode 100644
index 00000000..167bfe0d
--- /dev/null
+++ b/synapse/streams/config.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api.errors import SynapseError
+from synapse.types import StreamToken
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class SourcePaginationConfig(object):
+
+ """A configuration object which stores pagination parameters for a
+ specific event source."""
+
+ def __init__(self, from_key=None, to_key=None, direction='f',
+ limit=None):
+ self.from_key = from_key
+ self.to_key = to_key
+ self.direction = 'f' if direction == 'f' else 'b'
+ self.limit = int(limit) if limit is not None else None
+
+ def __repr__(self):
+ return (
+ "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)"
+ ) % (self.from_key, self.to_key, self.direction, self.limit)
+
+
+class PaginationConfig(object):
+
+ """A configuration object which stores pagination parameters."""
+
+ def __init__(self, from_token=None, to_token=None, direction='f',
+ limit=None):
+ self.from_token = from_token
+ self.to_token = to_token
+ self.direction = 'f' if direction == 'f' else 'b'
+ self.limit = int(limit) if limit is not None else None
+
+ @classmethod
+ def from_request(cls, request, raise_invalid_params=True,
+ default_limit=None):
+ def get_param(name, default=None):
+ lst = request.args.get(name, [])
+ if len(lst) > 1:
+ raise SynapseError(
+ 400, "%s must be specified only once" % (name,)
+ )
+ elif len(lst) == 1:
+ return lst[0]
+ else:
+ return default
+
+ direction = get_param("dir", 'f')
+ if direction not in ['f', 'b']:
+ raise SynapseError(400, "'dir' parameter is invalid.")
+
+ from_tok = get_param("from")
+ to_tok = get_param("to")
+
+ try:
+ if from_tok == "END":
+ from_tok = None # For backwards compat.
+ elif from_tok:
+ from_tok = StreamToken.from_string(from_tok)
+ except:
+ raise SynapseError(400, "'from' paramater is invalid")
+
+ try:
+ if to_tok:
+ to_tok = StreamToken.from_string(to_tok)
+ except:
+ raise SynapseError(400, "'to' paramater is invalid")
+
+ limit = get_param("limit", None)
+ if limit is not None and not limit.isdigit():
+ raise SynapseError(400, "'limit' parameter must be an integer.")
+
+ if limit is None:
+ limit = default_limit
+
+ try:
+ return PaginationConfig(from_tok, to_tok, direction, limit)
+ except:
+ logger.exception("Failed to create pagination config")
+ raise SynapseError(400, "Invalid request.")
+
+ def __repr__(self):
+ return (
+ "PaginationConfig(from_tok=%r, to_tok=%r,"
+ " direction=%r, limit=%r)"
+ ) % (self.from_token, self.to_token, self.direction, self.limit)
+
+ def get_source_config(self, source_name):
+ keyname = "%s_key" % source_name
+
+ return SourcePaginationConfig(
+ from_key=getattr(self.from_token, keyname),
+ to_key=getattr(self.to_token, keyname) if self.to_token else None,
+ direction=self.direction,
+ limit=self.limit,
+ )
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
new file mode 100644
index 00000000..f0d68b5b
--- /dev/null
+++ b/synapse/streams/events.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.types import StreamToken
+
+from synapse.handlers.presence import PresenceEventSource
+from synapse.handlers.room import RoomEventSource
+from synapse.handlers.typing import TypingNotificationEventSource
+from synapse.handlers.receipts import ReceiptEventSource
+from synapse.handlers.private_user_data import PrivateUserDataEventSource
+
+
+class EventSources(object):
+ SOURCE_TYPES = {
+ "room": RoomEventSource,
+ "presence": PresenceEventSource,
+ "typing": TypingNotificationEventSource,
+ "receipt": ReceiptEventSource,
+ "private_user_data": PrivateUserDataEventSource,
+ }
+
+ def __init__(self, hs):
+ self.sources = {
+ name: cls(hs)
+ for name, cls in EventSources.SOURCE_TYPES.items()
+ }
+
+ @defer.inlineCallbacks
+ def get_current_token(self, direction='f'):
+ token = StreamToken(
+ room_key=(
+ yield self.sources["room"].get_current_key(direction)
+ ),
+ presence_key=(
+ yield self.sources["presence"].get_current_key()
+ ),
+ typing_key=(
+ yield self.sources["typing"].get_current_key()
+ ),
+ receipt_key=(
+ yield self.sources["receipt"].get_current_key()
+ ),
+ private_user_data_key=(
+ yield self.sources["private_user_data"].get_current_key()
+ ),
+ )
+ defer.returnValue(token)
diff --git a/synapse/types.py b/synapse/types.py
new file mode 100644
index 00000000..28344d8b
--- /dev/null
+++ b/synapse/types.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.api.errors import SynapseError
+
+from collections import namedtuple
+
+
+class DomainSpecificString(
+ namedtuple("DomainSpecificString", ("localpart", "domain"))
+):
+ """Common base class among ID/name strings that have a local part and a
+ domain name, prefixed with a sigil.
+
+ Has the fields:
+
+ 'localpart' : The local part of the name (without the leading sigil)
+ 'domain' : The domain part of the name
+ """
+
+ # Deny iteration because it will bite you if you try to create a singleton
+ # set by:
+ # users = set(user)
+ def __iter__(self):
+ raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
+
+ # Because this class is a namedtuple of strings and booleans, it is deeply
+ # immutable.
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ @classmethod
+ def from_string(cls, s):
+ """Parse the string given by 's' into a structure object."""
+ if len(s) < 1 or s[0] != cls.SIGIL:
+ raise SynapseError(400, "Expected %s string to start with '%s'" % (
+ cls.__name__, cls.SIGIL,
+ ))
+
+ parts = s[1:].split(':', 1)
+ if len(parts) != 2:
+ raise SynapseError(
+ 400, "Expected %s of the form '%slocalname:domain'" % (
+ cls.__name__, cls.SIGIL,
+ )
+ )
+
+ domain = parts[1]
+
+ # This code will need changing if we want to support multiple domain
+ # names on one HS
+ return cls(localpart=parts[0], domain=domain)
+
+ def to_string(self):
+ """Return a string encoding the fields of the structure object."""
+ return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain)
+
+ __str__ = to_string
+
+ @classmethod
+ def create(cls, localpart, domain,):
+ return cls(localpart=localpart, domain=domain)
+
+
+class UserID(DomainSpecificString):
+ """Structure representing a user ID."""
+ SIGIL = "@"
+
+
+class RoomAlias(DomainSpecificString):
+ """Structure representing a room name."""
+ SIGIL = "#"
+
+
+class RoomID(DomainSpecificString):
+ """Structure representing a room id. """
+ SIGIL = "!"
+
+
+class EventID(DomainSpecificString):
+ """Structure representing an event id. """
+ SIGIL = "$"
+
+
+class StreamToken(
+ namedtuple("Token", (
+ "room_key",
+ "presence_key",
+ "typing_key",
+ "receipt_key",
+ "private_user_data_key",
+ ))
+):
+ _SEPARATOR = "_"
+
+ @classmethod
+ def from_string(cls, string):
+ try:
+ keys = string.split(cls._SEPARATOR)
+ while len(keys) < len(cls._fields):
+ # i.e. old token from before receipt_key
+ keys.append("0")
+ return cls(*keys)
+ except:
+ raise SynapseError(400, "Invalid Token")
+
+ def to_string(self):
+ return self._SEPARATOR.join([str(k) for k in self])
+
+ @property
+ def room_stream_id(self):
+ # TODO(markjh): Awful hack to work around hacks in the presence tests
+ # which assume that the keys are integers.
+ if type(self.room_key) is int:
+ return self.room_key
+ else:
+ return int(self.room_key[1:].split("-")[-1])
+
+ def is_after(self, other):
+ """Does this token contain events that the other doesn't?"""
+ return (
+ (other.room_stream_id < self.room_stream_id)
+ or (int(other.presence_key) < int(self.presence_key))
+ or (int(other.typing_key) < int(self.typing_key))
+ or (int(other.receipt_key) < int(self.receipt_key))
+ or (int(other.private_user_data_key) < int(self.private_user_data_key))
+ )
+
+ def copy_and_advance(self, key, new_value):
+ """Advance the given key in the token to a new value if and only if the
+ new value is after the old value.
+ """
+ new_token = self.copy_and_replace(key, new_value)
+ if key == "room_key":
+ new_id = new_token.room_stream_id
+ old_id = self.room_stream_id
+ else:
+ new_id = int(getattr(new_token, key))
+ old_id = int(getattr(self, key))
+ if old_id < new_id:
+ return new_token
+ else:
+ return self
+
+ def copy_and_replace(self, key, new_value):
+ d = self._asdict()
+ d[key] = new_value
+ return StreamToken(**d)
+
+
+class RoomStreamToken(namedtuple("_StreamToken", "topological stream")):
+ """Tokens are positions between events. The token "s1" comes after event 1.
+
+ s0 s1
+ | |
+ [0] V [1] V [2]
+
+ Tokens can either be a point in the live event stream or a cursor going
+ through historic events.
+
+ When traversing the live event stream events are ordered by when they
+ arrived at the homeserver.
+
+ When traversing historic events the events are ordered by their depth in
+ the event graph "topological_ordering" and then by when they arrived at the
+ homeserver "stream_ordering".
+
+ Live tokens start with an "s" followed by the "stream_ordering" id of the
+ event it comes after. Historic tokens start with a "t" followed by the
+ "topological_ordering" id of the event it comes after, followed by "-",
+ followed by the "stream_ordering" id of the event it comes after.
+ """
+ __slots__ = []
+
+ @classmethod
+ def parse(cls, string):
+ try:
+ if string[0] == 's':
+ return cls(topological=None, stream=int(string[1:]))
+ if string[0] == 't':
+ parts = string[1:].split('-', 1)
+ return cls(topological=int(parts[0]), stream=int(parts[1]))
+ except:
+ pass
+ raise SynapseError(400, "Invalid token %r" % (string,))
+
+ @classmethod
+ def parse_stream_token(cls, string):
+ try:
+ if string[0] == 's':
+ return cls(topological=None, stream=int(string[1:]))
+ except:
+ pass
+ raise SynapseError(400, "Invalid token %r" % (string,))
+
+ def __str__(self):
+ if self.topological is not None:
+ return "t%d-%d" % (self.topological, self.stream)
+ else:
+ return "s%d" % (self.stream,)
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
new file mode 100644
index 00000000..d69c7cb9
--- /dev/null
+++ b/synapse/util/__init__.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+
+from twisted.internet import defer, reactor, task
+
+import time
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def unwrapFirstError(failure):
+ # defer.gatherResults and DeferredLists wrap failures.
+ failure.trap(defer.FirstError)
+ return failure.value.subFailure
+
+
+class Clock(object):
+ """A small utility that obtains current time-of-day so that time may be
+ mocked during unit-tests.
+
+ TODO(paul): Also move the sleep() functionallity into it
+ """
+
+ def time(self):
+ """Returns the current system time in seconds since epoch."""
+ return time.time()
+
+ def time_msec(self):
+ """Returns the current system time in miliseconds since epoch."""
+ return self.time() * 1000
+
+ def looping_call(self, f, msec):
+ l = task.LoopingCall(f)
+ l.start(msec/1000.0, now=False)
+ return l
+
+ def stop_looping_call(self, loop):
+ loop.stop()
+
+ def call_later(self, delay, callback, *args, **kwargs):
+ """Call something later
+
+ Args:
+ delay(float): How long to wait in seconds.
+ callback(function): Function to call
+ *args: Postional arguments to pass to function.
+ **kwargs: Key arguments to pass to function.
+ """
+ current_context = LoggingContext.current_context()
+
+ def wrapped_callback(*args, **kwargs):
+ with PreserveLoggingContext():
+ LoggingContext.thread_local.current_context = current_context
+ callback(*args, **kwargs)
+
+ with PreserveLoggingContext():
+ return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
+
+ def cancel_call_later(self, timer, ignore_errs=False):
+ try:
+ timer.cancel()
+ except:
+ if not ignore_errs:
+ raise
+
+ def time_bound_deferred(self, given_deferred, time_out):
+ if given_deferred.called:
+ return given_deferred
+
+ ret_deferred = defer.Deferred()
+
+ def timed_out_fn():
+ try:
+ ret_deferred.errback(RuntimeError("Timed out"))
+ except:
+ pass
+
+ try:
+ given_deferred.cancel()
+ except:
+ pass
+
+ timer = None
+
+ def cancel(res):
+ try:
+ self.cancel_call_later(timer)
+ except:
+ pass
+ return res
+
+ ret_deferred.addBoth(cancel)
+
+ def sucess(res):
+ try:
+ ret_deferred.callback(res)
+ except:
+ pass
+
+ return res
+
+ def err(res):
+ try:
+ ret_deferred.errback(res)
+ except:
+ pass
+
+ given_deferred.addCallbacks(callback=sucess, errback=err)
+
+ timer = self.call_later(time_out, timed_out_fn)
+
+ return ret_deferred
diff --git a/synapse/util/async.py b/synapse/util/async.py
new file mode 100644
index 00000000..7bf2d38b
--- /dev/null
+++ b/synapse/util/async.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer, reactor
+
+from .logcontext import preserve_context_over_deferred
+
+
+def sleep(seconds):
+ d = defer.Deferred()
+ reactor.callLater(seconds, d.callback, seconds)
+ return preserve_context_over_deferred(d)
+
+
+def run_on_reactor():
+ """ This will cause the rest of the function to be invoked upon the next
+ iteration of the main loop
+ """
+ return sleep(0)
+
+
+class ObservableDeferred(object):
+ """Wraps a deferred object so that we can add observer deferreds. These
+ observer deferreds do not affect the callback chain of the original
+ deferred.
+
+ If consumeErrors is true errors will be captured from the origin deferred.
+
+ Cancelling or otherwise resolving an observer will not affect the original
+ ObservableDeferred.
+ """
+
+ __slots__ = ["_deferred", "_observers", "_result"]
+
+ def __init__(self, deferred, consumeErrors=False):
+ object.__setattr__(self, "_deferred", deferred)
+ object.__setattr__(self, "_result", None)
+ object.__setattr__(self, "_observers", set())
+
+ def callback(r):
+ object.__setattr__(self, "_result", (True, r))
+ while self._observers:
+ try:
+ self._observers.pop().callback(r)
+ except:
+ pass
+ return r
+
+ def errback(f):
+ object.__setattr__(self, "_result", (False, f))
+ while self._observers:
+ try:
+ self._observers.pop().errback(f)
+ except:
+ pass
+
+ if consumeErrors:
+ return None
+ else:
+ return f
+
+ deferred.addCallbacks(callback, errback)
+
+ def observe(self):
+ if not self._result:
+ d = defer.Deferred()
+
+ def remove(r):
+ self._observers.discard(d)
+ return r
+ d.addBoth(remove)
+
+ self._observers.add(d)
+ return d
+ else:
+ success, res = self._result
+ return defer.succeed(res) if success else defer.fail(res)
+
+ def observers(self):
+ return self._observers
+
+ def __getattr__(self, name):
+ return getattr(self._deferred, name)
+
+ def __setattr__(self, name, value):
+ setattr(self._deferred, name, value)
+
+ def __repr__(self):
+ return "<ObservableDeferred object at %s, result=%r, _deferred=%r>" % (
+ id(self), self._result, self._deferred,
+ )
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
new file mode 100644
index 00000000..da0e06a4
--- /dev/null
+++ b/synapse/util/caches/__init__.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synapse.metrics
+
+DEBUG_CACHES = False
+
+metrics = synapse.metrics.get_metrics_for("synapse.util.caches")
+
+caches_by_name = {}
+cache_counter = metrics.register_cache(
+ "cache",
+ lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
+ labels=["name"],
+)
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
new file mode 100644
index 00000000..362944bc
--- /dev/null
+++ b/synapse/util/caches/descriptors.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from synapse.util.async import ObservableDeferred
+from synapse.util import unwrapFirstError
+from synapse.util.caches.lrucache import LruCache
+
+from . import caches_by_name, DEBUG_CACHES, cache_counter
+
+from twisted.internet import defer
+
+from collections import OrderedDict
+
+import functools
+import inspect
+import threading
+
+logger = logging.getLogger(__name__)
+
+
+_CacheSentinel = object()
+
+
+class Cache(object):
+
+ def __init__(self, name, max_entries=1000, keylen=1, lru=True):
+ if lru:
+ self.cache = LruCache(max_size=max_entries)
+ self.max_entries = None
+ else:
+ self.cache = OrderedDict()
+ self.max_entries = max_entries
+
+ self.name = name
+ self.keylen = keylen
+ self.sequence = 0
+ self.thread = None
+ caches_by_name[name] = self.cache
+
+ def check_thread(self):
+ expected_thread = self.thread
+ if expected_thread is None:
+ self.thread = threading.current_thread()
+ else:
+ if expected_thread is not threading.current_thread():
+ raise ValueError(
+ "Cache objects can only be accessed from the main thread"
+ )
+
+ def get(self, key, default=_CacheSentinel):
+ val = self.cache.get(key, _CacheSentinel)
+ if val is not _CacheSentinel:
+ cache_counter.inc_hits(self.name)
+ return val
+
+ cache_counter.inc_misses(self.name)
+
+ if default is _CacheSentinel:
+ raise KeyError()
+ else:
+ return default
+
+ def update(self, sequence, key, value):
+ self.check_thread()
+ if self.sequence == sequence:
+ # Only update the cache if the caches sequence number matches the
+ # number that the cache had before the SELECT was started (SYN-369)
+ self.prefill(key, value)
+
+ def prefill(self, key, value):
+ if self.max_entries is not None:
+ while len(self.cache) >= self.max_entries:
+ self.cache.popitem(last=False)
+
+ self.cache[key] = value
+
+ def invalidate(self, key):
+ self.check_thread()
+ if not isinstance(key, tuple):
+ raise TypeError(
+ "The cache key must be a tuple not %r" % (type(key),)
+ )
+
+ # Increment the sequence number so that any SELECT statements that
+ # raced with the INSERT don't update the cache (SYN-369)
+ self.sequence += 1
+ self.cache.pop(key, None)
+
+ def invalidate_all(self):
+ self.check_thread()
+ self.sequence += 1
+ self.cache.clear()
+
+
+class CacheDescriptor(object):
+ """ A method decorator that applies a memoizing cache around the function.
+
+ This caches deferreds, rather than the results themselves. Deferreds that
+ fail are removed from the cache.
+
+ The function is presumed to take zero or more arguments, which are used in
+ a tuple as the key for the cache. Hits are served directly from the cache;
+ misses use the function body to generate the value.
+
+ The wrapped function has an additional member, a callable called
+ "invalidate". This can be used to remove individual entries from the cache.
+
+ The wrapped function has another additional callable, called "prefill",
+ which can be used to insert values into the cache specifically, without
+ calling the calculation function.
+ """
+ def __init__(self, orig, max_entries=1000, num_args=1, lru=True,
+ inlineCallbacks=False):
+ self.orig = orig
+
+ if inlineCallbacks:
+ self.function_to_call = defer.inlineCallbacks(orig)
+ else:
+ self.function_to_call = orig
+
+ self.max_entries = max_entries
+ self.num_args = num_args
+ self.lru = lru
+
+ self.arg_names = inspect.getargspec(orig).args[1:num_args+1]
+
+ if len(self.arg_names) < self.num_args:
+ raise Exception(
+ "Not enough explicit positional arguments to key off of for %r."
+ " (@cached cannot key off of *args or **kwars)"
+ % (orig.__name__,)
+ )
+
+ self.cache = Cache(
+ name=self.orig.__name__,
+ max_entries=self.max_entries,
+ keylen=self.num_args,
+ lru=self.lru,
+ )
+
+ def __get__(self, obj, objtype=None):
+
+ @functools.wraps(self.orig)
+ def wrapped(*args, **kwargs):
+ arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
+ cache_key = tuple(arg_dict[arg_nm] for arg_nm in self.arg_names)
+ try:
+ cached_result_d = self.cache.get(cache_key)
+
+ observer = cached_result_d.observe()
+ if DEBUG_CACHES:
+ @defer.inlineCallbacks
+ def check_result(cached_result):
+ actual_result = yield self.function_to_call(obj, *args, **kwargs)
+ if actual_result != cached_result:
+ logger.error(
+ "Stale cache entry %s%r: cached: %r, actual %r",
+ self.orig.__name__, cache_key,
+ cached_result, actual_result,
+ )
+ raise ValueError("Stale cache entry")
+ defer.returnValue(cached_result)
+ observer.addCallback(check_result)
+
+ return observer
+ except KeyError:
+ # Get the sequence number of the cache before reading from the
+ # database so that we can tell if the cache is invalidated
+ # while the SELECT is executing (SYN-369)
+ sequence = self.cache.sequence
+
+ ret = defer.maybeDeferred(
+ self.function_to_call,
+ obj, *args, **kwargs
+ )
+
+ def onErr(f):
+ self.cache.invalidate(cache_key)
+ return f
+
+ ret.addErrback(onErr)
+
+ ret = ObservableDeferred(ret, consumeErrors=True)
+ self.cache.update(sequence, cache_key, ret)
+
+ return ret.observe()
+
+ wrapped.invalidate = self.cache.invalidate
+ wrapped.invalidate_all = self.cache.invalidate_all
+ wrapped.prefill = self.cache.prefill
+
+ obj.__dict__[self.orig.__name__] = wrapped
+
+ return wrapped
+
+
+class CacheListDescriptor(object):
+ """Wraps an existing cache to support bulk fetching of keys.
+
+ Given a list of keys it looks in the cache to find any hits, then passes
+ the list of missing keys to the wrapped fucntion.
+ """
+
+ def __init__(self, orig, cache, list_name, num_args=1, inlineCallbacks=False):
+ """
+ Args:
+ orig (function)
+ cache (Cache)
+ list_name (str): Name of the argument which is the bulk lookup list
+ num_args (int)
+ inlineCallbacks (bool): Whether orig is a generator that should
+ be wrapped by defer.inlineCallbacks
+ """
+ self.orig = orig
+
+ if inlineCallbacks:
+ self.function_to_call = defer.inlineCallbacks(orig)
+ else:
+ self.function_to_call = orig
+
+ self.num_args = num_args
+ self.list_name = list_name
+
+ self.arg_names = inspect.getargspec(orig).args[1:num_args+1]
+ self.list_pos = self.arg_names.index(self.list_name)
+
+ self.cache = cache
+
+ self.sentinel = object()
+
+ if len(self.arg_names) < self.num_args:
+ raise Exception(
+ "Not enough explicit positional arguments to key off of for %r."
+ " (@cached cannot key off of *args or **kwars)"
+ % (orig.__name__,)
+ )
+
+ if self.list_name not in self.arg_names:
+ raise Exception(
+ "Couldn't see arguments %r for %r."
+ % (self.list_name, cache.name,)
+ )
+
+ def __get__(self, obj, objtype=None):
+
+ @functools.wraps(self.orig)
+ def wrapped(*args, **kwargs):
+ arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
+ keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
+ list_args = arg_dict[self.list_name]
+
+ # cached is a dict arg -> deferred, where deferred results in a
+ # 2-tuple (`arg`, `result`)
+ cached = {}
+ missing = []
+ for arg in list_args:
+ key = list(keyargs)
+ key[self.list_pos] = arg
+
+ try:
+ res = self.cache.get(tuple(key)).observe()
+ res.addCallback(lambda r, arg: (arg, r), arg)
+ cached[arg] = res
+ except KeyError:
+ missing.append(arg)
+
+ if missing:
+ sequence = self.cache.sequence
+ args_to_call = dict(arg_dict)
+ args_to_call[self.list_name] = missing
+
+ ret_d = defer.maybeDeferred(
+ self.function_to_call,
+ **args_to_call
+ )
+
+ ret_d = ObservableDeferred(ret_d)
+
+ # We need to create deferreds for each arg in the list so that
+ # we can insert the new deferred into the cache.
+ for arg in missing:
+ observer = ret_d.observe()
+ observer.addCallback(lambda r, arg: r.get(arg, None), arg)
+
+ observer = ObservableDeferred(observer)
+
+ key = list(keyargs)
+ key[self.list_pos] = arg
+ self.cache.update(sequence, tuple(key), observer)
+
+ def invalidate(f, key):
+ self.cache.invalidate(key)
+ return f
+ observer.addErrback(invalidate, tuple(key))
+
+ res = observer.observe()
+ res.addCallback(lambda r, arg: (arg, r), arg)
+
+ cached[arg] = res
+
+ return defer.gatherResults(
+ cached.values(),
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res))
+
+ obj.__dict__[self.orig.__name__] = wrapped
+
+ return wrapped
+
+
+def cached(max_entries=1000, num_args=1, lru=True):
+ return lambda orig: CacheDescriptor(
+ orig,
+ max_entries=max_entries,
+ num_args=num_args,
+ lru=lru
+ )
+
+
+def cachedInlineCallbacks(max_entries=1000, num_args=1, lru=False):
+ return lambda orig: CacheDescriptor(
+ orig,
+ max_entries=max_entries,
+ num_args=num_args,
+ lru=lru,
+ inlineCallbacks=True,
+ )
+
+
+def cachedList(cache, list_name, num_args=1, inlineCallbacks=False):
+ """Creates a descriptor that wraps a function in a `CacheListDescriptor`.
+
+ Used to do batch lookups for an already created cache. A single argument
+ is specified as a list that is iterated through to lookup keys in the
+ original cache. A new list consisting of the keys that weren't in the cache
+ get passed to the original function, the result of which is stored in the
+ cache.
+
+ Args:
+ cache (Cache): The underlying cache to use.
+ list_name (str): The name of the argument that is the list to use to
+ do batch lookups in the cache.
+ num_args (int): Number of arguments to use as the key in the cache.
+ inlineCallbacks (bool): Should the function be wrapped in an
+ `defer.inlineCallbacks`?
+
+ Example:
+
+ class Example(object):
+ @cached(num_args=2)
+ def do_something(self, first_arg):
+ ...
+
+ @cachedList(do_something.cache, list_name="second_args", num_args=2)
+ def batch_do_something(self, first_arg, second_args):
+ ...
+ """
+ return lambda orig: CacheListDescriptor(
+ orig,
+ cache=cache,
+ list_name=list_name,
+ num_args=num_args,
+ inlineCallbacks=inlineCallbacks,
+ )
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
new file mode 100644
index 00000000..e69adf62
--- /dev/null
+++ b/synapse/util/caches/dictionary_cache.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.util.caches.lrucache import LruCache
+from collections import namedtuple
+from . import caches_by_name, cache_counter
+import threading
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+DictionaryEntry = namedtuple("DictionaryEntry", ("full", "value"))
+
+
+class DictionaryCache(object):
+ """Caches key -> dictionary lookups, supporting caching partial dicts, i.e.
+ fetching a subset of dictionary keys for a particular key.
+ """
+
+ def __init__(self, name, max_entries=1000):
+ self.cache = LruCache(max_size=max_entries)
+
+ self.name = name
+ self.sequence = 0
+ self.thread = None
+ # caches_by_name[name] = self.cache
+
+ class Sentinel(object):
+ __slots__ = []
+
+ self.sentinel = Sentinel()
+ caches_by_name[name] = self.cache
+
+ def check_thread(self):
+ expected_thread = self.thread
+ if expected_thread is None:
+ self.thread = threading.current_thread()
+ else:
+ if expected_thread is not threading.current_thread():
+ raise ValueError(
+ "Cache objects can only be accessed from the main thread"
+ )
+
+ def get(self, key, dict_keys=None):
+ entry = self.cache.get(key, self.sentinel)
+ if entry is not self.sentinel:
+ cache_counter.inc_hits(self.name)
+
+ if dict_keys is None:
+ return DictionaryEntry(entry.full, dict(entry.value))
+ else:
+ return DictionaryEntry(entry.full, {
+ k: entry.value[k]
+ for k in dict_keys
+ if k in entry.value
+ })
+
+ cache_counter.inc_misses(self.name)
+ return DictionaryEntry(False, {})
+
+ def invalidate(self, key):
+ self.check_thread()
+
+ # Increment the sequence number so that any SELECT statements that
+ # raced with the INSERT don't update the cache (SYN-369)
+ self.sequence += 1
+ self.cache.pop(key, None)
+
+ def invalidate_all(self):
+ self.check_thread()
+ self.sequence += 1
+ self.cache.clear()
+
+ def update(self, sequence, key, value, full=False):
+ self.check_thread()
+ if self.sequence == sequence:
+ # Only update the cache if the caches sequence number matches the
+ # number that the cache had before the SELECT was started (SYN-369)
+ if full:
+ self._insert(key, value)
+ else:
+ self._update_or_insert(key, value)
+
+ def _update_or_insert(self, key, value):
+ entry = self.cache.setdefault(key, DictionaryEntry(False, {}))
+ entry.value.update(value)
+
+ def _insert(self, key, value):
+ self.cache[key] = DictionaryEntry(True, value)
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
new file mode 100644
index 00000000..06d1eea0
--- /dev/null
+++ b/synapse/util/caches/expiringcache.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class ExpiringCache(object):
+ def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
+ reset_expiry_on_get=False):
+ """
+ Args:
+ cache_name (str): Name of this cache, used for logging.
+ clock (Clock)
+ max_len (int): Max size of dict. If the dict grows larger than this
+ then the oldest items get automatically evicted. Default is 0,
+ which indicates there is no max limit.
+ expiry_ms (int): How long before an item is evicted from the cache
+ in milliseconds. Default is 0, indicating items never get
+ evicted based on time.
+ reset_expiry_on_get (bool): If true, will reset the expiry time for
+ an item on access. Defaults to False.
+
+ """
+ self._cache_name = cache_name
+
+ self._clock = clock
+
+ self._max_len = max_len
+ self._expiry_ms = expiry_ms
+
+ self._reset_expiry_on_get = reset_expiry_on_get
+
+ self._cache = {}
+
+ def start(self):
+ if not self._expiry_ms:
+ # Don't bother starting the loop if things never expire
+ return
+
+ def f():
+ self._prune_cache()
+
+ self._clock.looping_call(f, self._expiry_ms/2)
+
+ def __setitem__(self, key, value):
+ now = self._clock.time_msec()
+ self._cache[key] = _CacheEntry(now, value)
+
+ # Evict if there are now too many items
+ if self._max_len and len(self._cache.keys()) > self._max_len:
+ sorted_entries = sorted(
+ self._cache.items(),
+ key=lambda (k, v): v.time,
+ )
+
+ for k, _ in sorted_entries[self._max_len:]:
+ self._cache.pop(k)
+
+ def __getitem__(self, key):
+ entry = self._cache[key]
+
+ if self._reset_expiry_on_get:
+ entry.time = self._clock.time_msec()
+
+ return entry.value
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def _prune_cache(self):
+ if not self._expiry_ms:
+ # zero expiry time means don't expire. This should never get called
+ # since we have this check in start too.
+ return
+ begin_length = len(self._cache)
+
+ now = self._clock.time_msec()
+
+ keys_to_delete = set()
+
+ for key, cache_entry in self._cache.items():
+ if now - cache_entry.time > self._expiry_ms:
+ keys_to_delete.add(key)
+
+ for k in keys_to_delete:
+ self._cache.pop(k)
+
+ logger.debug(
+ "[%s] _prune_cache before: %d, after len: %d",
+ self._cache_name, begin_length, len(self._cache.keys())
+ )
+
+
+class _CacheEntry(object):
+ def __init__(self, time, value):
+ self.time = time
+ self.value = value
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
new file mode 100644
index 00000000..cacd7e45
--- /dev/null
+++ b/synapse/util/caches/lrucache.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from functools import wraps
+import threading
+
+
+class LruCache(object):
+ """Least-recently-used cache."""
+ def __init__(self, max_size):
+ cache = {}
+ list_root = []
+ list_root[:] = [list_root, list_root, None, None]
+
+ PREV, NEXT, KEY, VALUE = 0, 1, 2, 3
+
+ lock = threading.Lock()
+
+ def synchronized(f):
+ @wraps(f)
+ def inner(*args, **kwargs):
+ with lock:
+ return f(*args, **kwargs)
+
+ return inner
+
+ def add_node(key, value):
+ prev_node = list_root
+ next_node = prev_node[NEXT]
+ node = [prev_node, next_node, key, value]
+ prev_node[NEXT] = node
+ next_node[PREV] = node
+ cache[key] = node
+
+ def move_node_to_front(node):
+ prev_node = node[PREV]
+ next_node = node[NEXT]
+ prev_node[NEXT] = next_node
+ next_node[PREV] = prev_node
+ prev_node = list_root
+ next_node = prev_node[NEXT]
+ node[PREV] = prev_node
+ node[NEXT] = next_node
+ prev_node[NEXT] = node
+ next_node[PREV] = node
+
+ def delete_node(node):
+ prev_node = node[PREV]
+ next_node = node[NEXT]
+ prev_node[NEXT] = next_node
+ next_node[PREV] = prev_node
+ cache.pop(node[KEY], None)
+
+ @synchronized
+ def cache_get(key, default=None):
+ node = cache.get(key, None)
+ if node is not None:
+ move_node_to_front(node)
+ return node[VALUE]
+ else:
+ return default
+
+ @synchronized
+ def cache_set(key, value):
+ node = cache.get(key, None)
+ if node is not None:
+ move_node_to_front(node)
+ node[VALUE] = value
+ else:
+ add_node(key, value)
+ if len(cache) > max_size:
+ delete_node(list_root[PREV])
+
+ @synchronized
+ def cache_set_default(key, value):
+ node = cache.get(key, None)
+ if node is not None:
+ return node[VALUE]
+ else:
+ add_node(key, value)
+ if len(cache) > max_size:
+ delete_node(list_root[PREV])
+ return value
+
+ @synchronized
+ def cache_pop(key, default=None):
+ node = cache.get(key, None)
+ if node:
+ delete_node(node)
+ return node[VALUE]
+ else:
+ return default
+
+ @synchronized
+ def cache_clear():
+ list_root[NEXT] = list_root
+ list_root[PREV] = list_root
+ cache.clear()
+
+ @synchronized
+ def cache_len():
+ return len(cache)
+
+ @synchronized
+ def cache_contains(key):
+ return key in cache
+
+ self.sentinel = object()
+ self.get = cache_get
+ self.set = cache_set
+ self.setdefault = cache_set_default
+ self.pop = cache_pop
+ self.len = cache_len
+ self.contains = cache_contains
+ self.clear = cache_clear
+
+ def __getitem__(self, key):
+ result = self.get(key, self.sentinel)
+ if result is self.sentinel:
+ raise KeyError()
+ else:
+ return result
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def __delitem__(self, key, value):
+ result = self.pop(key, self.sentinel)
+ if result is self.sentinel:
+ raise KeyError()
+
+ def __len__(self):
+ return self.len()
+
+ def __contains__(self, key):
+ return self.contains(key)
diff --git a/synapse/util/debug.py b/synapse/util/debug.py
new file mode 100644
index 00000000..f6a5a841
--- /dev/null
+++ b/synapse/util/debug.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer, reactor
+from functools import wraps
+from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+
+
+def debug_deferreds():
+ """Cause all deferreds to wait for a reactor tick before running their
+ callbacks. This increases the chance of getting a stack trace out of
+ a defer.inlineCallback since the code waiting on the deferred will get
+ a chance to add an errback before the deferred runs."""
+
+ # Helper method for retrieving and restoring the current logging context
+ # around a callback.
+ def with_logging_context(fn):
+ context = LoggingContext.current_context()
+
+ def restore_context_callback(x):
+ with PreserveLoggingContext():
+ LoggingContext.thread_local.current_context = context
+ return fn(x)
+
+ return restore_context_callback
+
+ # We are going to modify the __init__ method of defer.Deferred so we
+ # need to get a copy of the old method so we can still call it.
+ old__init__ = defer.Deferred.__init__
+
+ # We need to create a deferred to bounce the callbacks through the reactor
+ # but we don't want to add a callback when we create that deferred so we
+ # we create a new type of deferred that uses the old __init__ method.
+ # This is safe as long as the old __init__ method doesn't invoke an
+ # __init__ using super.
+ class Bouncer(defer.Deferred):
+ __init__ = old__init__
+
+ # We'll add this as a callback to all Deferreds. Twisted will wait until
+ # the bouncer deferred resolves before calling the callbacks of the
+ # original deferred.
+ def bounce_callback(x):
+ bouncer = Bouncer()
+ reactor.callLater(0, with_logging_context(bouncer.callback), x)
+ return bouncer
+
+ # We'll add this as an errback to all Deferreds. Twisted will wait until
+ # the bouncer deferred resolves before calling the errbacks of the
+ # original deferred.
+ def bounce_errback(x):
+ bouncer = Bouncer()
+ reactor.callLater(0, with_logging_context(bouncer.errback), x)
+ return bouncer
+
+ @wraps(old__init__)
+ def new__init__(self, *args, **kargs):
+ old__init__(self, *args, **kargs)
+ self.addCallbacks(bounce_callback, bounce_errback)
+
+ defer.Deferred.__init__ = new__init__
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
new file mode 100644
index 00000000..064c4a7a
--- /dev/null
+++ b/synapse/util/distributor.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.util.logcontext import (
+ PreserveLoggingContext, preserve_context_over_deferred,
+)
+
+from synapse.util import unwrapFirstError
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class Distributor(object):
+ """A central dispatch point for loosely-connected pieces of code to
+ register, observe, and fire signals.
+
+ Signals are named simply by strings.
+
+ TODO(paul): It would be nice to give signals stronger object identities,
+ so we can attach metadata, docstrings, detect typoes, etc... But this
+ model will do for today.
+ """
+
+ def __init__(self, suppress_failures=True):
+ self.suppress_failures = suppress_failures
+
+ self.signals = {}
+ self.pre_registration = {}
+
+ def declare(self, name):
+ if name in self.signals:
+ raise KeyError("%r already has a signal named %s" % (self, name))
+
+ self.signals[name] = Signal(
+ name,
+ suppress_failures=self.suppress_failures,
+ )
+
+ if name in self.pre_registration:
+ signal = self.signals[name]
+ for observer in self.pre_registration[name]:
+ signal.observe(observer)
+
+ def observe(self, name, observer):
+ if name in self.signals:
+ self.signals[name].observe(observer)
+ else:
+ # TODO: Avoid strong ordering dependency by allowing people to
+ # pre-register observations on signals that don't exist yet.
+ if name not in self.pre_registration:
+ self.pre_registration[name] = []
+ self.pre_registration[name].append(observer)
+
+ def fire(self, name, *args, **kwargs):
+ if name not in self.signals:
+ raise KeyError("%r does not have a signal named %s" % (self, name))
+
+ return self.signals[name].fire(*args, **kwargs)
+
+
+class Signal(object):
+ """A Signal is a dispatch point that stores a list of callables as
+ observers of it.
+
+ Signals can be "fired", meaning that every callable observing it is
+ invoked. Firing a signal does not change its state; it can be fired again
+ at any later point. Firing a signal passes any arguments from the fire
+ method into all of the observers.
+ """
+
+ def __init__(self, name, suppress_failures):
+ self.name = name
+ self.suppress_failures = suppress_failures
+ self.observers = []
+
+ def observe(self, observer):
+ """Adds a new callable to the observer list which will be invoked by
+ the 'fire' method.
+
+ Each observer callable may return a Deferred."""
+ self.observers.append(observer)
+
+ def fire(self, *args, **kwargs):
+ """Invokes every callable in the observer list, passing in the args and
+ kwargs. Exceptions thrown by observers are logged but ignored. It is
+ not an error to fire a signal with no observers.
+
+ Returns a Deferred that will complete when all the observers have
+ completed."""
+
+ def do(observer):
+ def eb(failure):
+ logger.warning(
+ "%s signal observer %s failed: %r",
+ self.name, observer, failure,
+ exc_info=(
+ failure.type,
+ failure.value,
+ failure.getTracebackObject()))
+ if not self.suppress_failures:
+ return failure
+ return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
+
+ with PreserveLoggingContext():
+ deferreds = [
+ do(observer)
+ for observer in self.observers
+ ]
+
+ d = defer.gatherResults(deferreds, consumeErrors=True)
+
+ d.addErrback(unwrapFirstError)
+
+ return preserve_context_over_deferred(d)
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
new file mode 100644
index 00000000..9e10d37a
--- /dev/null
+++ b/synapse/util/frozenutils.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from frozendict import frozendict
+
+
+def freeze(o):
+ t = type(o)
+ if t is dict:
+ return frozendict({k: freeze(v) for k, v in o.items()})
+
+ if t is frozendict:
+ return o
+
+ if t is str or t is unicode:
+ return o
+
+ try:
+ return tuple([freeze(i) for i in o])
+ except TypeError:
+ pass
+
+ return o
+
+
+def unfreeze(o):
+ t = type(o)
+ if t is dict or t is frozendict:
+ return dict({k: unfreeze(v) for k, v in o.items()})
+
+ if t is str or t is unicode:
+ return o
+
+ try:
+ return [unfreeze(i) for i in o]
+ except TypeError:
+ pass
+
+ return o
diff --git a/synapse/util/jsonobject.py b/synapse/util/jsonobject.py
new file mode 100644
index 00000000..00f86ed2
--- /dev/null
+++ b/synapse/util/jsonobject.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class JsonEncodedObject(object):
+ """ A common base class for defining protocol units that are represented
+ as JSON.
+
+ Attributes:
+ unrecognized_keys (dict): A dict containing all the key/value pairs we
+ don't recognize.
+ """
+
+ valid_keys = [] # keys we will store
+ """A list of strings that represent keys we know about
+ and can handle. If we have values for these keys they will be
+ included in the `dictionary` instance variable.
+ """
+
+ internal_keys = [] # keys to ignore while building dict
+ """A list of strings that should *not* be encoded into JSON.
+ """
+
+ required_keys = []
+ """A list of strings that we require to exist. If they are not given upon
+ construction it raises an exception.
+ """
+
+ def __init__(self, **kwargs):
+ """ Takes the dict of `kwargs` and loads all keys that are *valid*
+ (i.e., are included in the `valid_keys` list) into the dictionary`
+ instance variable.
+
+ Any keys that aren't recognized are added to the `unrecognized_keys`
+ attribute.
+
+ Args:
+ **kwargs: Attributes associated with this protocol unit.
+ """
+ for required_key in self.required_keys:
+ if required_key not in kwargs:
+ raise RuntimeError("Key %s is required" % required_key)
+
+ self.unrecognized_keys = {} # Keys we were given not listed as valid
+ for k, v in kwargs.items():
+ if k in self.valid_keys or k in self.internal_keys:
+ self.__dict__[k] = v
+ else:
+ self.unrecognized_keys[k] = v
+
+ def get_dict(self):
+ """ Converts this protocol unit into a :py:class:`dict`, ready to be
+ encoded as JSON.
+
+ The keys it encodes are: `valid_keys` - `internal_keys`
+
+ Returns
+ dict
+ """
+ d = {
+ k: _encode(v) for (k, v) in self.__dict__.items()
+ if k in self.valid_keys and k not in self.internal_keys
+ }
+ d.update(self.unrecognized_keys)
+ return d
+
+ def __str__(self):
+ return "(%s, %s)" % (self.__class__.__name__, repr(self.__dict__))
+
+
+def _encode(obj):
+ if type(obj) is list:
+ return [_encode(o) for o in obj]
+
+ if isinstance(obj, JsonEncodedObject):
+ return obj.get_dict()
+
+ return obj
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
new file mode 100644
index 00000000..7e6062c1
--- /dev/null
+++ b/synapse/util/logcontext.py
@@ -0,0 +1,197 @@
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+import threading
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class LoggingContext(object):
+ """Additional context for log formatting. Contexts are scoped within a
+ "with" block. Contexts inherit the state of their parent contexts.
+ Args:
+ name (str): Name for the context for debugging.
+ """
+
+ __slots__ = ["parent_context", "name", "__dict__"]
+
+ thread_local = threading.local()
+
+ class Sentinel(object):
+ """Sentinel to represent the root context"""
+
+ __slots__ = []
+
+ def __str__(self):
+ return "sentinel"
+
+ def copy_to(self, record):
+ pass
+
+ sentinel = Sentinel()
+
+ def __init__(self, name=None):
+ self.parent_context = None
+ self.name = name
+
+ def __str__(self):
+ return "%s@%x" % (self.name, id(self))
+
+ @classmethod
+ def current_context(cls):
+ """Get the current logging context from thread local storage"""
+ return getattr(cls.thread_local, "current_context", cls.sentinel)
+
+ def __enter__(self):
+ """Enters this logging context into thread local storage"""
+ if self.parent_context is not None:
+ raise Exception("Attempt to enter logging context multiple times")
+ self.parent_context = self.current_context()
+ self.thread_local.current_context = self
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Restore the logging context in thread local storage to the state it
+ was before this context was entered.
+ Returns:
+ None to avoid suppressing any exeptions that were thrown.
+ """
+ if self.thread_local.current_context is not self:
+ if self.thread_local.current_context is self.sentinel:
+ logger.debug("Expected logging context %s has been lost", self)
+ else:
+ logger.warn(
+ "Current logging context %s is not expected context %s",
+ self.thread_local.current_context,
+ self
+ )
+ self.thread_local.current_context = self.parent_context
+ self.parent_context = None
+
+ def __getattr__(self, name):
+ """Delegate member lookup to parent context"""
+ return getattr(self.parent_context, name)
+
+ def copy_to(self, record):
+ """Copy fields from this context and its parents to the record"""
+ if self.parent_context is not None:
+ self.parent_context.copy_to(record)
+ for key, value in self.__dict__.items():
+ setattr(record, key, value)
+
+
+class LoggingContextFilter(logging.Filter):
+ """Logging filter that adds values from the current logging context to each
+ record.
+ Args:
+ **defaults: Default values to avoid formatters complaining about
+ missing fields
+ """
+ def __init__(self, **defaults):
+ self.defaults = defaults
+
+ def filter(self, record):
+ """Add each fields from the logging contexts to the record.
+ Returns:
+ True to include the record in the log output.
+ """
+ context = LoggingContext.current_context()
+ for key, value in self.defaults.items():
+ setattr(record, key, value)
+ context.copy_to(record)
+ return True
+
+
+class PreserveLoggingContext(object):
+ """Captures the current logging context and restores it when the scope is
+ exited. Used to restore the context after a function using
+ @defer.inlineCallbacks is resumed by a callback from the reactor."""
+
+ __slots__ = ["current_context"]
+
+ def __enter__(self):
+ """Captures the current logging context"""
+ self.current_context = LoggingContext.current_context()
+ LoggingContext.thread_local.current_context = LoggingContext.sentinel
+
+ def __exit__(self, type, value, traceback):
+ """Restores the current logging context"""
+ LoggingContext.thread_local.current_context = self.current_context
+
+ if self.current_context is not LoggingContext.sentinel:
+ if self.current_context.parent_context is None:
+ logger.warn(
+ "Restoring dead context: %s",
+ self.current_context,
+ )
+
+
+class _PreservingContextDeferred(defer.Deferred):
+ """A deferred that ensures that all callbacks and errbacks are called with
+ the given logging context.
+ """
+ def __init__(self, context):
+ self._log_context = context
+ defer.Deferred.__init__(self)
+
+ def addCallbacks(self, callback, errback=None,
+ callbackArgs=None, callbackKeywords=None,
+ errbackArgs=None, errbackKeywords=None):
+ callback = self._wrap_callback(callback)
+ errback = self._wrap_callback(errback)
+ return defer.Deferred.addCallbacks(
+ self, callback,
+ errback=errback,
+ callbackArgs=callbackArgs,
+ callbackKeywords=callbackKeywords,
+ errbackArgs=errbackArgs,
+ errbackKeywords=errbackKeywords,
+ )
+
+ def _wrap_callback(self, f):
+ def g(res, *args, **kwargs):
+ with PreserveLoggingContext():
+ LoggingContext.thread_local.current_context = self._log_context
+ res = f(res, *args, **kwargs)
+ return res
+ return g
+
+
+def preserve_context_over_fn(fn, *args, **kwargs):
+ """Takes a function and invokes it with the given arguments, but removes
+ and restores the current logging context while doing so.
+
+ If the result is a deferred, call preserve_context_over_deferred before
+ returning it.
+ """
+ with PreserveLoggingContext():
+ res = fn(*args, **kwargs)
+
+ if isinstance(res, defer.Deferred):
+ return preserve_context_over_deferred(res)
+ else:
+ return res
+
+
+def preserve_context_over_deferred(deferred):
+ """Given a deferred wrap it such that any callbacks added later to it will
+ be invoked with the current context.
+ """
+ current_context = LoggingContext.current_context()
+ d = _PreservingContextDeferred(current_context)
+ deferred.chainDeferred(d)
+ return d
diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py
new file mode 100644
index 00000000..fd9ac4d4
--- /dev/null
+++ b/synapse/util/logutils.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from inspect import getcallargs
+from functools import wraps
+
+import logging
+import inspect
+import time
+
+
+_TIME_FUNC_ID = 0
+
+
+def _log_debug_as_f(f, msg, msg_args):
+ name = f.__module__
+ logger = logging.getLogger(name)
+
+ if logger.isEnabledFor(logging.DEBUG):
+ lineno = f.func_code.co_firstlineno
+ pathname = f.func_code.co_filename
+
+ record = logging.LogRecord(
+ name=name,
+ level=logging.DEBUG,
+ pathname=pathname,
+ lineno=lineno,
+ msg=msg,
+ args=msg_args,
+ exc_info=None
+ )
+
+ logger.handle(record)
+
+
+def log_function(f):
+ """ Function decorator that logs every call to that function.
+ """
+ func_name = f.__name__
+
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ name = f.__module__
+ logger = logging.getLogger(name)
+ level = logging.DEBUG
+
+ if logger.isEnabledFor(level):
+ bound_args = getcallargs(f, *args, **kwargs)
+
+ def format(value):
+ r = str(value)
+ if len(r) > 50:
+ r = r[:50] + "..."
+ return r
+
+ func_args = [
+ "%s=%s" % (k, format(v)) for k, v in bound_args.items()
+ ]
+
+ msg_args = {
+ "func_name": func_name,
+ "args": ", ".join(func_args)
+ }
+
+ _log_debug_as_f(
+ f,
+ "Invoked '%(func_name)s' with args: %(args)s",
+ msg_args
+ )
+
+ return f(*args, **kwargs)
+
+ wrapped.__name__ = func_name
+ return wrapped
+
+
+def time_function(f):
+ func_name = f.__name__
+
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ global _TIME_FUNC_ID
+ id = _TIME_FUNC_ID
+ _TIME_FUNC_ID += 1
+
+ start = time.clock() * 1000
+
+ try:
+ _log_debug_as_f(
+ f,
+ "[FUNC START] {%s-%d}",
+ (func_name, id),
+ )
+
+ r = f(*args, **kwargs)
+ finally:
+ end = time.clock() * 1000
+ _log_debug_as_f(
+ f,
+ "[FUNC END] {%s-%d} %f",
+ (func_name, id, end-start,),
+ )
+
+ return r
+
+ return wrapped
+
+
+def trace_function(f):
+ func_name = f.__name__
+ linenum = f.func_code.co_firstlineno
+ pathname = f.func_code.co_filename
+
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ name = f.__module__
+ logger = logging.getLogger(name)
+ level = logging.DEBUG
+
+ s = inspect.currentframe().f_back
+
+ to_print = [
+ "\t%s:%s %s. Args: args=%s, kwargs=%s" % (
+ pathname, linenum, func_name, args, kwargs
+ )
+ ]
+ while s:
+ if True or s.f_globals["__name__"].startswith("synapse"):
+ filename, lineno, function, _, _ = inspect.getframeinfo(s)
+ args_string = inspect.formatargvalues(*inspect.getargvalues(s))
+
+ to_print.append(
+ "\t%s:%d %s. Args: %s" % (
+ filename, lineno, function, args_string
+ )
+ )
+
+ s = s.f_back
+
+ msg = "\nTraceback for %s:\n" % (func_name,) + "\n".join(to_print)
+
+ record = logging.LogRecord(
+ name=name,
+ level=level,
+ pathname=pathname,
+ lineno=lineno,
+ msg=msg,
+ args=None,
+ exc_info=None
+ )
+
+ logger.handle(record)
+
+ return f(*args, **kwargs)
+
+ wrapped.__name__ = func_name
+ return wrapped
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
new file mode 100644
index 00000000..d4457af9
--- /dev/null
+++ b/synapse/util/ratelimitutils.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import LimitExceededError
+
+from synapse.util.async import sleep
+
+import collections
+import contextlib
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class FederationRateLimiter(object):
+ def __init__(self, clock, window_size, sleep_limit, sleep_msec,
+ reject_limit, concurrent_requests):
+ """
+ Args:
+ clock (Clock)
+ window_size (int): The window size in milliseconds.
+ sleep_limit (int): The number of requests received in the last
+ `window_size` milliseconds before we artificially start
+ delaying processing of requests.
+ sleep_msec (int): The number of milliseconds to delay processing
+ of incoming requests by.
+ reject_limit (int): The maximum number of requests that are can be
+ queued for processing before we start rejecting requests with
+ a 429 Too Many Requests response.
+ concurrent_requests (int): The number of concurrent requests to
+ process.
+ """
+ self.clock = clock
+
+ self.window_size = window_size
+ self.sleep_limit = sleep_limit
+ self.sleep_msec = sleep_msec
+ self.reject_limit = reject_limit
+ self.concurrent_requests = concurrent_requests
+
+ self.ratelimiters = {}
+
+ def ratelimit(self, host):
+ """Used to ratelimit an incoming request from given host
+
+ Example usage:
+
+ with rate_limiter.ratelimit(origin) as wait_deferred:
+ yield wait_deferred
+ # Handle request ...
+
+ Args:
+ host (str): Origin of incoming request.
+
+ Returns:
+ _PerHostRatelimiter
+ """
+ return self.ratelimiters.setdefault(
+ host,
+ _PerHostRatelimiter(
+ clock=self.clock,
+ window_size=self.window_size,
+ sleep_limit=self.sleep_limit,
+ sleep_msec=self.sleep_msec,
+ reject_limit=self.reject_limit,
+ concurrent_requests=self.concurrent_requests,
+ )
+ ).ratelimit()
+
+
+class _PerHostRatelimiter(object):
+ def __init__(self, clock, window_size, sleep_limit, sleep_msec,
+ reject_limit, concurrent_requests):
+ self.clock = clock
+
+ self.window_size = window_size
+ self.sleep_limit = sleep_limit
+ self.sleep_msec = sleep_msec
+ self.reject_limit = reject_limit
+ self.concurrent_requests = concurrent_requests
+
+ self.sleeping_requests = set()
+ self.ready_request_queue = collections.OrderedDict()
+ self.current_processing = set()
+ self.request_times = []
+
+ def is_empty(self):
+ time_now = self.clock.time_msec()
+ self.request_times[:] = [
+ r for r in self.request_times
+ if time_now - r < self.window_size
+ ]
+
+ return not (
+ self.ready_request_queue
+ or self.sleeping_requests
+ or self.current_processing
+ or self.request_times
+ )
+
+ @contextlib.contextmanager
+ def ratelimit(self):
+ # `contextlib.contextmanager` takes a generator and turns it into a
+ # context manager. The generator should only yield once with a value
+ # to be returned by manager.
+ # Exceptions will be reraised at the yield.
+
+ request_id = object()
+ ret = self._on_enter(request_id)
+ try:
+ yield ret
+ finally:
+ self._on_exit(request_id)
+
+ def _on_enter(self, request_id):
+ time_now = self.clock.time_msec()
+ self.request_times[:] = [
+ r for r in self.request_times
+ if time_now - r < self.window_size
+ ]
+
+ queue_size = len(self.ready_request_queue) + len(self.sleeping_requests)
+ if queue_size > self.reject_limit:
+ raise LimitExceededError(
+ retry_after_ms=int(
+ self.window_size / self.sleep_limit
+ ),
+ )
+
+ self.request_times.append(time_now)
+
+ def queue_request():
+ if len(self.current_processing) > self.concurrent_requests:
+ logger.debug("Ratelimit [%s]: Queue req", id(request_id))
+ queue_defer = defer.Deferred()
+ self.ready_request_queue[request_id] = queue_defer
+ return queue_defer
+ else:
+ return defer.succeed(None)
+
+ logger.debug(
+ "Ratelimit [%s]: len(self.request_times)=%d",
+ id(request_id), len(self.request_times),
+ )
+
+ if len(self.request_times) > self.sleep_limit:
+ logger.debug(
+ "Ratelimit [%s]: sleeping req",
+ id(request_id),
+ )
+ ret_defer = sleep(self.sleep_msec/1000.0)
+
+ self.sleeping_requests.add(request_id)
+
+ def on_wait_finished(_):
+ logger.debug(
+ "Ratelimit [%s]: Finished sleeping",
+ id(request_id),
+ )
+ self.sleeping_requests.discard(request_id)
+ queue_defer = queue_request()
+ return queue_defer
+
+ ret_defer.addBoth(on_wait_finished)
+ else:
+ ret_defer = queue_request()
+
+ def on_start(r):
+ logger.debug(
+ "Ratelimit [%s]: Processing req",
+ id(request_id),
+ )
+ self.current_processing.add(request_id)
+ return r
+
+ def on_err(r):
+ self.current_processing.discard(request_id)
+ return r
+
+ def on_both(r):
+ # Ensure that we've properly cleaned up.
+ self.sleeping_requests.discard(request_id)
+ self.ready_request_queue.pop(request_id, None)
+ return r
+
+ ret_defer.addCallbacks(on_start, on_err)
+ ret_defer.addBoth(on_both)
+ return ret_defer
+
+ def _on_exit(self, request_id):
+ logger.debug(
+ "Ratelimit [%s]: Processed req",
+ id(request_id),
+ )
+ self.current_processing.discard(request_id)
+ try:
+ request_id, deferred = self.ready_request_queue.popitem()
+ self.current_processing.add(request_id)
+ deferred.callback(None)
+ except KeyError:
+ pass
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
new file mode 100644
index 00000000..2fe68148
--- /dev/null
+++ b/synapse/util/retryutils.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from synapse.api.errors import CodeMessageException
+
+import logging
+import random
+
+
+logger = logging.getLogger(__name__)
+
+
+class NotRetryingDestination(Exception):
+ def __init__(self, retry_last_ts, retry_interval, destination):
+ msg = "Not retrying server %s." % (destination,)
+ super(NotRetryingDestination, self).__init__(msg)
+
+ self.retry_last_ts = retry_last_ts
+ self.retry_interval = retry_interval
+ self.destination = destination
+
+
+@defer.inlineCallbacks
+def get_retry_limiter(destination, clock, store, **kwargs):
+ """For a given destination check if we have previously failed to
+ send a request there and are waiting before retrying the destination.
+ If we are not ready to retry the destination, this will raise a
+ NotRetryingDestination exception. Otherwise, will return a Context Manager
+ that will mark the destination as down if an exception is thrown (excluding
+ CodeMessageException with code < 500)
+
+ Example usage:
+
+ try:
+ limiter = yield get_retry_limiter(destination, clock, store)
+ with limiter:
+ response = yield do_request()
+ except NotRetryingDestination:
+ # We aren't ready to retry that destination.
+ raise
+ """
+ retry_last_ts, retry_interval = (0, 0)
+
+ retry_timings = yield store.get_destination_retry_timings(
+ destination
+ )
+
+ if retry_timings:
+ retry_last_ts, retry_interval = (
+ retry_timings["retry_last_ts"], retry_timings["retry_interval"]
+ )
+
+ now = int(clock.time_msec())
+
+ if retry_last_ts + retry_interval > now:
+ raise NotRetryingDestination(
+ retry_last_ts=retry_last_ts,
+ retry_interval=retry_interval,
+ destination=destination,
+ )
+
+ defer.returnValue(
+ RetryDestinationLimiter(
+ destination,
+ clock,
+ store,
+ retry_interval,
+ **kwargs
+ )
+ )
+
+
+class RetryDestinationLimiter(object):
+ def __init__(self, destination, clock, store, retry_interval,
+ min_retry_interval=10 * 60 * 1000,
+ max_retry_interval=24 * 60 * 60 * 1000,
+ multiplier_retry_interval=5,):
+ """Marks the destination as "down" if an exception is thrown in the
+ context, except for CodeMessageException with code < 500.
+
+ If no exception is raised, marks the destination as "up".
+
+ Args:
+ destination (str)
+ clock (Clock)
+ store (DataStore)
+ retry_interval (int): The next retry interval taken from the
+ database in milliseconds, or zero if the last request was
+ successful.
+ min_retry_interval (int): The minimum retry interval to use after
+ a failed request, in milliseconds.
+ max_retry_interval (int): The maximum retry interval to use after
+ a failed request, in milliseconds.
+ multiplier_retry_interval (int): The multiplier to use to increase
+ the retry interval after a failed request.
+ """
+ self.clock = clock
+ self.store = store
+ self.destination = destination
+
+ self.retry_interval = retry_interval
+ self.min_retry_interval = min_retry_interval
+ self.max_retry_interval = max_retry_interval
+ self.multiplier_retry_interval = multiplier_retry_interval
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ def err(failure):
+ logger.exception(
+ "Failed to store set_destination_retry_timings",
+ failure.value
+ )
+
+ valid_err_code = False
+ if exc_type is CodeMessageException:
+ valid_err_code = 0 <= exc_val.code < 500
+
+ if exc_type is None or valid_err_code:
+ # We connected successfully.
+ if not self.retry_interval:
+ return
+
+ retry_last_ts = 0
+ self.retry_interval = 0
+ else:
+ # We couldn't connect.
+ if self.retry_interval:
+ self.retry_interval *= self.multiplier_retry_interval
+ self.retry_interval *= int(random.uniform(0.8, 1.4))
+
+ if self.retry_interval >= self.max_retry_interval:
+ self.retry_interval = self.max_retry_interval
+ else:
+ self.retry_interval = self.min_retry_interval
+
+ retry_last_ts = int(self.clock.time_msec())
+
+ self.store.set_destination_retry_timings(
+ self.destination, retry_last_ts, self.retry_interval
+ ).addErrback(err)
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
new file mode 100644
index 00000000..f3a36340
--- /dev/null
+++ b/synapse/util/stringutils.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014, 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import string
+
+_string_with_symbols = (
+ string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
+)
+
+
+def origin_from_ucid(ucid):
+ return ucid.split("@", 1)[1]
+
+
+def random_string(length):
+ return ''.join(random.choice(string.ascii_letters) for _ in xrange(length))
+
+
+def random_string_with_symbols(length):
+ return ''.join(
+ random.choice(_string_with_symbols) for _ in xrange(length)
+ )
+
+
+def is_ascii(s):
+ try:
+ s.encode("ascii")
+ except UnicodeEncodeError:
+ return False
+ except UnicodeDecodeError:
+ return False
+ else:
+ return True
diff --git a/synctl b/synctl
new file mode 120000
index 00000000..1bdceda2
--- /dev/null
+++ b/synctl
@@ -0,0 +1 @@
+./synapse/app/synctl.py \ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..9bff9ec1
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/tests/api/__init__.py b/tests/api/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/api/__init__.py
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
new file mode 100644
index 00000000..70d928de
--- /dev/null
+++ b/tests/api/test_auth.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock
+
+from synapse.api.auth import Auth
+from synapse.api.errors import AuthError
+from synapse.types import UserID
+from tests.utils import setup_test_homeserver
+
+import pymacaroons
+
+
+class AuthTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.state_handler = Mock()
+ self.store = Mock()
+
+ self.hs = yield setup_test_homeserver(handlers=None)
+ self.hs.get_datastore = Mock(return_value=self.store)
+ self.auth = Auth(self.hs)
+
+ self.test_user = "@foo:bar"
+ self.test_token = "_test_token_"
+
+ @defer.inlineCallbacks
+ def test_get_user_by_req_user_valid_token(self):
+ self.store.get_app_service_by_token = Mock(return_value=None)
+ user_info = {
+ "name": self.test_user,
+ "token_id": "ditto",
+ }
+ self.store.get_user_by_access_token = Mock(return_value=user_info)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ (user, _, _) = yield self.auth.get_user_by_req(request)
+ self.assertEquals(user.to_string(), self.test_user)
+
+ def test_get_user_by_req_user_bad_token(self):
+ self.store.get_app_service_by_token = Mock(return_value=None)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ d = self.auth.get_user_by_req(request)
+ self.failureResultOf(d, AuthError)
+
+ def test_get_user_by_req_user_missing_token(self):
+ self.store.get_app_service_by_token = Mock(return_value=None)
+ user_info = {
+ "name": self.test_user,
+ "token_id": "ditto",
+ }
+ self.store.get_user_by_access_token = Mock(return_value=user_info)
+
+ request = Mock(args={})
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ d = self.auth.get_user_by_req(request)
+ self.failureResultOf(d, AuthError)
+
+ @defer.inlineCallbacks
+ def test_get_user_by_req_appservice_valid_token(self):
+ app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+ self.store.get_app_service_by_token = Mock(return_value=app_service)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ (user, _, _) = yield self.auth.get_user_by_req(request)
+ self.assertEquals(user.to_string(), self.test_user)
+
+ def test_get_user_by_req_appservice_bad_token(self):
+ self.store.get_app_service_by_token = Mock(return_value=None)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ d = self.auth.get_user_by_req(request)
+ self.failureResultOf(d, AuthError)
+
+ def test_get_user_by_req_appservice_missing_token(self):
+ app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+ self.store.get_app_service_by_token = Mock(return_value=app_service)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ d = self.auth.get_user_by_req(request)
+ self.failureResultOf(d, AuthError)
+
+ @defer.inlineCallbacks
+ def test_get_user_by_req_appservice_valid_token_valid_user_id(self):
+ masquerading_user_id = "@doppelganger:matrix.org"
+ app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+ app_service.is_interested_in_user = Mock(return_value=True)
+ self.store.get_app_service_by_token = Mock(return_value=app_service)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.args["user_id"] = [masquerading_user_id]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ (user, _, _) = yield self.auth.get_user_by_req(request)
+ self.assertEquals(user.to_string(), masquerading_user_id)
+
+ def test_get_user_by_req_appservice_valid_token_bad_user_id(self):
+ masquerading_user_id = "@doppelganger:matrix.org"
+ app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
+ app_service.is_interested_in_user = Mock(return_value=False)
+ self.store.get_app_service_by_token = Mock(return_value=app_service)
+ self.store.get_user_by_access_token = Mock(return_value=None)
+
+ request = Mock(args={})
+ request.args["access_token"] = [self.test_token]
+ request.args["user_id"] = [masquerading_user_id]
+ request.requestHeaders.getRawHeaders = Mock(return_value=[""])
+ d = self.auth.get_user_by_req(request)
+ self.failureResultOf(d, AuthError)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon(self):
+ # TODO(danielwh): Remove this mock when we remove the
+ # get_user_by_access_token fallback.
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ user_id = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
+ user_info = yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ user = user_info["user"]
+ self.assertEqual(UserID.from_string(user_id), user)
+
+ @defer.inlineCallbacks
+ def test_get_guest_user_from_macaroon(self):
+ user_id = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
+ macaroon.add_first_party_caveat("guest = true")
+ serialized = macaroon.serialize()
+
+ user_info = yield self.auth._get_user_from_macaroon(serialized)
+ user = user_info["user"]
+ is_guest = user_info["is_guest"]
+ self.assertEqual(UserID.from_string(user_id), user)
+ self.assertTrue(is_guest)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon_user_db_mismatch(self):
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@percy:matrix.org"}
+ )
+
+ user = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user,))
+ with self.assertRaises(AuthError) as cm:
+ yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ self.assertEqual(401, cm.exception.code)
+ self.assertIn("User mismatch", cm.exception.msg)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon_missing_caveat(self):
+ # TODO(danielwh): Remove this mock when we remove the
+ # get_user_by_access_token fallback.
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+
+ with self.assertRaises(AuthError) as cm:
+ yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ self.assertEqual(401, cm.exception.code)
+ self.assertIn("No user caveat", cm.exception.msg)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon_wrong_key(self):
+ # TODO(danielwh): Remove this mock when we remove the
+ # get_user_by_access_token fallback.
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ user = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key + "wrong")
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user,))
+
+ with self.assertRaises(AuthError) as cm:
+ yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ self.assertEqual(401, cm.exception.code)
+ self.assertIn("Invalid macaroon", cm.exception.msg)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon_unknown_caveat(self):
+ # TODO(danielwh): Remove this mock when we remove the
+ # get_user_by_access_token fallback.
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ user = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user,))
+ macaroon.add_first_party_caveat("cunning > fox")
+
+ with self.assertRaises(AuthError) as cm:
+ yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ self.assertEqual(401, cm.exception.code)
+ self.assertIn("Invalid macaroon", cm.exception.msg)
+
+ @defer.inlineCallbacks
+ def test_get_user_from_macaroon_expired(self):
+ # TODO(danielwh): Remove this mock when we remove the
+ # get_user_by_access_token fallback.
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ self.store.get_user_by_access_token = Mock(
+ return_value={"name": "@baldrick:matrix.org"}
+ )
+
+ user = "@baldrick:matrix.org"
+ macaroon = pymacaroons.Macaroon(
+ location=self.hs.config.server_name,
+ identifier="key",
+ key=self.hs.config.macaroon_secret_key)
+ macaroon.add_first_party_caveat("gen = 1")
+ macaroon.add_first_party_caveat("type = access")
+ macaroon.add_first_party_caveat("user_id = %s" % (user,))
+ macaroon.add_first_party_caveat("time < 1") # ms
+
+ self.hs.clock.now = 5000 # seconds
+
+ yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ # TODO(daniel): Turn on the check that we validate expiration, when we
+ # validate expiration (and remove the above line, which will start
+ # throwing).
+ # with self.assertRaises(AuthError) as cm:
+ # yield self.auth._get_user_from_macaroon(macaroon.serialize())
+ # self.assertEqual(401, cm.exception.code)
+ # self.assertIn("Invalid macaroon", cm.exception.msg)
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
new file mode 100644
index 00000000..9f9af2d7
--- /dev/null
+++ b/tests/api/test_filtering.py
@@ -0,0 +1,507 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import namedtuple
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, NonCallableMock
+from tests.utils import (
+ MockHttpResource, DeferredMockCallable, setup_test_homeserver
+)
+
+from synapse.types import UserID
+from synapse.api.filtering import FilterCollection, Filter
+
+user_localpart = "test_user"
+# MockEvent = namedtuple("MockEvent", "sender type room_id")
+
+
+def MockEvent(**kwargs):
+ ev = NonCallableMock(spec_set=kwargs.keys())
+ ev.configure_mock(**kwargs)
+ return ev
+
+
+class FilteringTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_federation_resource = MockHttpResource()
+
+ self.mock_http_client = Mock(spec=[])
+ self.mock_http_client.put_json = DeferredMockCallable()
+
+ hs = yield setup_test_homeserver(
+ handlers=None,
+ http_client=self.mock_http_client,
+ keyring=Mock(),
+ )
+
+ self.filtering = hs.get_filtering()
+
+ self.datastore = hs.get_datastore()
+
+ def test_definition_types_works_with_literals(self):
+ definition = {
+ "types": ["m.room.message", "org.matrix.foo.bar"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_types_works_with_wildcards(self):
+ definition = {
+ "types": ["m.*", "org.matrix.foo.bar"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_types_works_with_unknowns(self):
+ definition = {
+ "types": ["m.room.message", "org.matrix.foo.bar"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="now.for.something.completely.different",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_types_works_with_literals(self):
+ definition = {
+ "not_types": ["m.room.message", "org.matrix.foo.bar"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_types_works_with_wildcards(self):
+ definition = {
+ "not_types": ["m.room.message", "org.matrix.*"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="org.matrix.custom.event",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_types_works_with_unknowns(self):
+ definition = {
+ "not_types": ["m.*", "org.*"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="com.nom.nom.nom",
+ room_id="!foo:bar"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_types_takes_priority_over_types(self):
+ definition = {
+ "not_types": ["m.*", "org.*"],
+ "types": ["m.room.message", "m.room.topic"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.topic",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_senders_works_with_literals(self):
+ definition = {
+ "senders": ["@flibble:wibble"]
+ }
+ event = MockEvent(
+ sender="@flibble:wibble",
+ type="com.nom.nom.nom",
+ room_id="!foo:bar"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_senders_works_with_unknowns(self):
+ definition = {
+ "senders": ["@flibble:wibble"]
+ }
+ event = MockEvent(
+ sender="@challenger:appears",
+ type="com.nom.nom.nom",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_senders_works_with_literals(self):
+ definition = {
+ "not_senders": ["@flibble:wibble"]
+ }
+ event = MockEvent(
+ sender="@flibble:wibble",
+ type="com.nom.nom.nom",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_senders_works_with_unknowns(self):
+ definition = {
+ "not_senders": ["@flibble:wibble"]
+ }
+ event = MockEvent(
+ sender="@challenger:appears",
+ type="com.nom.nom.nom",
+ room_id="!foo:bar"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_senders_takes_priority_over_senders(self):
+ definition = {
+ "not_senders": ["@misspiggy:muppets"],
+ "senders": ["@kermit:muppets", "@misspiggy:muppets"]
+ }
+ event = MockEvent(
+ sender="@misspiggy:muppets",
+ type="m.room.topic",
+ room_id="!foo:bar"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_rooms_works_with_literals(self):
+ definition = {
+ "rooms": ["!secretbase:unknown"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_rooms_works_with_unknowns(self):
+ definition = {
+ "rooms": ["!secretbase:unknown"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!anothersecretbase:unknown"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_rooms_works_with_literals(self):
+ definition = {
+ "not_rooms": ["!anothersecretbase:unknown"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!anothersecretbase:unknown"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_rooms_works_with_unknowns(self):
+ definition = {
+ "not_rooms": ["!secretbase:unknown"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!anothersecretbase:unknown"
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_not_rooms_takes_priority_over_rooms(self):
+ definition = {
+ "not_rooms": ["!secretbase:unknown"],
+ "rooms": ["!secretbase:unknown"]
+ }
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown"
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_combined_event(self):
+ definition = {
+ "not_senders": ["@misspiggy:muppets"],
+ "senders": ["@kermit:muppets"],
+ "rooms": ["!stage:unknown"],
+ "not_rooms": ["!piggyshouse:muppets"],
+ "types": ["m.room.message", "muppets.kermit.*"],
+ "not_types": ["muppets.misspiggy.*"]
+ }
+ event = MockEvent(
+ sender="@kermit:muppets", # yup
+ type="m.room.message", # yup
+ room_id="!stage:unknown" # yup
+ )
+ self.assertTrue(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_combined_event_bad_sender(self):
+ definition = {
+ "not_senders": ["@misspiggy:muppets"],
+ "senders": ["@kermit:muppets"],
+ "rooms": ["!stage:unknown"],
+ "not_rooms": ["!piggyshouse:muppets"],
+ "types": ["m.room.message", "muppets.kermit.*"],
+ "not_types": ["muppets.misspiggy.*"]
+ }
+ event = MockEvent(
+ sender="@misspiggy:muppets", # nope
+ type="m.room.message", # yup
+ room_id="!stage:unknown" # yup
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_combined_event_bad_room(self):
+ definition = {
+ "not_senders": ["@misspiggy:muppets"],
+ "senders": ["@kermit:muppets"],
+ "rooms": ["!stage:unknown"],
+ "not_rooms": ["!piggyshouse:muppets"],
+ "types": ["m.room.message", "muppets.kermit.*"],
+ "not_types": ["muppets.misspiggy.*"]
+ }
+ event = MockEvent(
+ sender="@kermit:muppets", # yup
+ type="m.room.message", # yup
+ room_id="!piggyshouse:muppets" # nope
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ def test_definition_combined_event_bad_type(self):
+ definition = {
+ "not_senders": ["@misspiggy:muppets"],
+ "senders": ["@kermit:muppets"],
+ "rooms": ["!stage:unknown"],
+ "not_rooms": ["!piggyshouse:muppets"],
+ "types": ["m.room.message", "muppets.kermit.*"],
+ "not_types": ["muppets.misspiggy.*"]
+ }
+ event = MockEvent(
+ sender="@kermit:muppets", # yup
+ type="muppets.misspiggy.kisses", # nope
+ room_id="!stage:unknown" # yup
+ )
+ self.assertFalse(
+ Filter(definition).check(event)
+ )
+
+ @defer.inlineCallbacks
+ def test_filter_presence_match(self):
+ user_filter_json = {
+ "presence": {
+ "types": ["m.*"]
+ }
+ }
+ user = UserID.from_string("@" + user_localpart + ":test")
+ filter_id = yield self.datastore.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.profile",
+ )
+ events = [event]
+
+ user_filter = yield self.filtering.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=filter_id,
+ )
+
+ results = user_filter.filter_presence(events=events)
+ self.assertEquals(events, results)
+
+ @defer.inlineCallbacks
+ def test_filter_presence_no_match(self):
+ user_filter_json = {
+ "presence": {
+ "types": ["m.*"]
+ }
+ }
+ user = UserID.from_string("@" + user_localpart + ":test")
+ filter_id = yield self.datastore.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+ event = MockEvent(
+ sender="@foo:bar",
+ type="custom.avatar.3d.crazy",
+ )
+ events = [event]
+
+ user_filter = yield self.filtering.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=filter_id,
+ )
+
+ results = user_filter.filter_presence(events=events)
+ self.assertEquals([], results)
+
+ @defer.inlineCallbacks
+ def test_filter_room_state_match(self):
+ user_filter_json = {
+ "room": {
+ "state": {
+ "types": ["m.*"]
+ }
+ }
+ }
+ user = UserID.from_string("@" + user_localpart + ":test")
+ filter_id = yield self.datastore.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.topic",
+ room_id="!foo:bar"
+ )
+ events = [event]
+
+ user_filter = yield self.filtering.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=filter_id,
+ )
+
+ results = user_filter.filter_room_state(events=events)
+ self.assertEquals(events, results)
+
+ @defer.inlineCallbacks
+ def test_filter_room_state_no_match(self):
+ user_filter_json = {
+ "room": {
+ "state": {
+ "types": ["m.*"]
+ }
+ }
+ }
+ user = UserID.from_string("@" + user_localpart + ":test")
+ filter_id = yield self.datastore.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+ event = MockEvent(
+ sender="@foo:bar",
+ type="org.matrix.custom.event",
+ room_id="!foo:bar"
+ )
+ events = [event]
+
+ user_filter = yield self.filtering.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=filter_id,
+ )
+
+ results = user_filter.filter_room_state(events)
+ self.assertEquals([], results)
+
+ @defer.inlineCallbacks
+ def test_add_filter(self):
+ user_filter_json = {
+ "room": {
+ "state": {
+ "types": ["m.*"]
+ }
+ }
+ }
+
+ filter_id = yield self.filtering.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+
+ self.assertEquals(filter_id, 0)
+ self.assertEquals(user_filter_json,
+ (yield self.datastore.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=0,
+ ))
+ )
+
+ @defer.inlineCallbacks
+ def test_get_filter(self):
+ user_filter_json = {
+ "room": {
+ "state": {
+ "types": ["m.*"]
+ }
+ }
+ }
+
+ filter_id = yield self.datastore.add_user_filter(
+ user_localpart=user_localpart,
+ user_filter=user_filter_json,
+ )
+
+ filter = yield self.filtering.get_user_filter(
+ user_localpart=user_localpart,
+ filter_id=filter_id,
+ )
+
+ self.assertEquals(filter.filter_json, user_filter_json)
diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
new file mode 100644
index 00000000..dd0bc19e
--- /dev/null
+++ b/tests/api/test_ratelimiting.py
@@ -0,0 +1,39 @@
+from synapse.api.ratelimiting import Ratelimiter
+
+from tests import unittest
+
+class TestRatelimiter(unittest.TestCase):
+
+ def test_allowed(self):
+ limiter = Ratelimiter()
+ allowed, time_allowed = limiter.send_message(
+ user_id="test_id", time_now_s=0, msg_rate_hz=0.1, burst_count=1,
+ )
+ self.assertTrue(allowed)
+ self.assertEquals(10., time_allowed)
+
+ allowed, time_allowed = limiter.send_message(
+ user_id="test_id", time_now_s=5, msg_rate_hz=0.1, burst_count=1,
+ )
+ self.assertFalse(allowed)
+ self.assertEquals(10., time_allowed)
+
+ allowed, time_allowed = limiter.send_message(
+ user_id="test_id", time_now_s=10, msg_rate_hz=0.1, burst_count=1
+ )
+ self.assertTrue(allowed)
+ self.assertEquals(20., time_allowed)
+
+ def test_pruning(self):
+ limiter = Ratelimiter()
+ allowed, time_allowed = limiter.send_message(
+ user_id="test_id_1", time_now_s=0, msg_rate_hz=0.1, burst_count=1,
+ )
+
+ self.assertIn("test_id_1", limiter.message_counts)
+
+ allowed, time_allowed = limiter.send_message(
+ user_id="test_id_2", time_now_s=10, msg_rate_hz=0.1, burst_count=1
+ )
+
+ self.assertNotIn("test_id_1", limiter.message_counts)
diff --git a/tests/appservice/__init__.py b/tests/appservice/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/tests/appservice/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
new file mode 100644
index 00000000..8ce8dc0a
--- /dev/null
+++ b/tests/appservice/test_appservice.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.appservice import ApplicationService
+
+from mock import Mock, PropertyMock
+from tests import unittest
+
+
+def _regex(regex, exclusive=True):
+ return {
+ "regex": regex,
+ "exclusive": exclusive
+ }
+
+
+class ApplicationServiceTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.service = ApplicationService(
+ url="some_url",
+ token="some_token",
+ namespaces={
+ ApplicationService.NS_USERS: [],
+ ApplicationService.NS_ROOMS: [],
+ ApplicationService.NS_ALIASES: []
+ }
+ )
+ self.event = Mock(
+ type="m.something", room_id="!foo:bar", sender="@someone:somewhere"
+ )
+
+ def test_regex_user_id_prefix_match(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@irc_foobar:matrix.org"
+ self.assertTrue(self.service.is_interested(self.event))
+
+ def test_regex_user_id_prefix_no_match(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@someone_else:matrix.org"
+ self.assertFalse(self.service.is_interested(self.event))
+
+ def test_regex_room_member_is_checked(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@someone_else:matrix.org"
+ self.event.type = "m.room.member"
+ self.event.state_key = "@irc_foobar:matrix.org"
+ self.assertTrue(self.service.is_interested(self.event))
+
+ def test_regex_room_id_match(self):
+ self.service.namespaces[ApplicationService.NS_ROOMS].append(
+ _regex("!some_prefix.*some_suffix:matrix.org")
+ )
+ self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
+ self.assertTrue(self.service.is_interested(self.event))
+
+ def test_regex_room_id_no_match(self):
+ self.service.namespaces[ApplicationService.NS_ROOMS].append(
+ _regex("!some_prefix.*some_suffix:matrix.org")
+ )
+ self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
+ self.assertFalse(self.service.is_interested(self.event))
+
+ def test_regex_alias_match(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#irc_.*:matrix.org")
+ )
+ self.assertTrue(self.service.is_interested(
+ self.event,
+ aliases_for_event=["#irc_foobar:matrix.org", "#athing:matrix.org"]
+ ))
+
+ def test_non_exclusive_alias(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#irc_.*:matrix.org", exclusive=False)
+ )
+ self.assertFalse(self.service.is_exclusive_alias(
+ "#irc_foobar:matrix.org"
+ ))
+
+ def test_non_exclusive_room(self):
+ self.service.namespaces[ApplicationService.NS_ROOMS].append(
+ _regex("!irc_.*:matrix.org", exclusive=False)
+ )
+ self.assertFalse(self.service.is_exclusive_room(
+ "!irc_foobar:matrix.org"
+ ))
+
+ def test_non_exclusive_user(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*:matrix.org", exclusive=False)
+ )
+ self.assertFalse(self.service.is_exclusive_user(
+ "@irc_foobar:matrix.org"
+ ))
+
+ def test_exclusive_alias(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#irc_.*:matrix.org", exclusive=True)
+ )
+ self.assertTrue(self.service.is_exclusive_alias(
+ "#irc_foobar:matrix.org"
+ ))
+
+ def test_exclusive_user(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*:matrix.org", exclusive=True)
+ )
+ self.assertTrue(self.service.is_exclusive_user(
+ "@irc_foobar:matrix.org"
+ ))
+
+ def test_exclusive_room(self):
+ self.service.namespaces[ApplicationService.NS_ROOMS].append(
+ _regex("!irc_.*:matrix.org", exclusive=True)
+ )
+ self.assertTrue(self.service.is_exclusive_room(
+ "!irc_foobar:matrix.org"
+ ))
+
+ def test_regex_alias_no_match(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#irc_.*:matrix.org")
+ )
+ self.assertFalse(self.service.is_interested(
+ self.event,
+ aliases_for_event=["#xmpp_foobar:matrix.org", "#athing:matrix.org"]
+ ))
+
+ def test_regex_multiple_matches(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#irc_.*:matrix.org")
+ )
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@irc_foobar:matrix.org"
+ self.assertTrue(self.service.is_interested(
+ self.event,
+ aliases_for_event=["#irc_barfoo:matrix.org"]
+ ))
+
+ def test_restrict_to_rooms(self):
+ self.service.namespaces[ApplicationService.NS_ROOMS].append(
+ _regex("!flibble_.*:matrix.org")
+ )
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@irc_foobar:matrix.org"
+ self.event.room_id = "!wibblewoo:matrix.org"
+ self.assertFalse(self.service.is_interested(
+ self.event,
+ restrict_to=ApplicationService.NS_ROOMS
+ ))
+
+ def test_restrict_to_aliases(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#xmpp_.*:matrix.org")
+ )
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@irc_foobar:matrix.org"
+ self.assertFalse(self.service.is_interested(
+ self.event,
+ restrict_to=ApplicationService.NS_ALIASES,
+ aliases_for_event=["#irc_barfoo:matrix.org"]
+ ))
+
+ def test_restrict_to_senders(self):
+ self.service.namespaces[ApplicationService.NS_ALIASES].append(
+ _regex("#xmpp_.*:matrix.org")
+ )
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.sender = "@xmpp_foobar:matrix.org"
+ self.assertFalse(self.service.is_interested(
+ self.event,
+ restrict_to=ApplicationService.NS_USERS,
+ aliases_for_event=["#xmpp_barfoo:matrix.org"]
+ ))
+
+ def test_interested_in_self(self):
+ # make sure invites get through
+ self.service.sender = "@appservice:name"
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ self.event.type = "m.room.member"
+ self.event.content = {
+ "membership": "invite"
+ }
+ self.event.state_key = self.service.sender
+ self.assertTrue(self.service.is_interested(self.event))
+
+ def test_member_list_match(self):
+ self.service.namespaces[ApplicationService.NS_USERS].append(
+ _regex("@irc_.*")
+ )
+ join_list = [
+ "@alice:here",
+ "@irc_fo:here", # AS user
+ "@bob:here",
+ ]
+
+ self.event.sender = "@xmpp_foobar:matrix.org"
+ self.assertTrue(self.service.is_interested(
+ event=self.event,
+ member_list=join_list
+ ))
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
new file mode 100644
index 00000000..82a59650
--- /dev/null
+++ b/tests/appservice/test_scheduler.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.appservice import ApplicationServiceState, AppServiceTransaction
+from synapse.appservice.scheduler import (
+ _ServiceQueuer, _TransactionController, _Recoverer
+)
+from twisted.internet import defer
+from ..utils import MockClock
+from mock import Mock
+from tests import unittest
+
+
+class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.clock = MockClock()
+ self.store = Mock()
+ self.as_api = Mock()
+ self.recoverer = Mock()
+ self.recoverer_fn = Mock(return_value=self.recoverer)
+ self.txnctrl = _TransactionController(
+ clock=self.clock, store=self.store, as_api=self.as_api,
+ recoverer_fn=self.recoverer_fn
+ )
+
+ def test_single_service_up_txn_sent(self):
+ # Test: The AS is up and the txn is successfully sent.
+ service = Mock()
+ events = [Mock(), Mock()]
+ txn_id = "foobar"
+ txn = Mock(id=txn_id, service=service, events=events)
+
+ # mock methods
+ self.store.get_appservice_state = Mock(
+ return_value=defer.succeed(ApplicationServiceState.UP)
+ )
+ txn.send = Mock(return_value=defer.succeed(True))
+ self.store.create_appservice_txn = Mock(
+ return_value=defer.succeed(txn)
+ )
+
+ # actual call
+ self.txnctrl.send(service, events)
+
+ self.store.create_appservice_txn.assert_called_once_with(
+ service=service, events=events # txn made and saved
+ )
+ self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made
+ txn.complete.assert_called_once_with(self.store) # txn completed
+
+ def test_single_service_down(self):
+ # Test: The AS is down so it shouldn't push; Recoverers will do it.
+ # It should still make a transaction though.
+ service = Mock()
+ events = [Mock(), Mock()]
+
+ txn = Mock(id="idhere", service=service, events=events)
+ self.store.get_appservice_state = Mock(
+ return_value=defer.succeed(ApplicationServiceState.DOWN)
+ )
+ self.store.create_appservice_txn = Mock(
+ return_value=defer.succeed(txn)
+ )
+
+ # actual call
+ self.txnctrl.send(service, events)
+
+ self.store.create_appservice_txn.assert_called_once_with(
+ service=service, events=events # txn made and saved
+ )
+ self.assertEquals(0, txn.send.call_count) # txn not sent though
+ self.assertEquals(0, txn.complete.call_count) # or completed
+
+ def test_single_service_up_txn_not_sent(self):
+ # Test: The AS is up and the txn is not sent. A Recoverer is made and
+ # started.
+ service = Mock()
+ events = [Mock(), Mock()]
+ txn_id = "foobar"
+ txn = Mock(id=txn_id, service=service, events=events)
+
+ # mock methods
+ self.store.get_appservice_state = Mock(
+ return_value=defer.succeed(ApplicationServiceState.UP)
+ )
+ self.store.set_appservice_state = Mock(return_value=defer.succeed(True))
+ txn.send = Mock(return_value=defer.succeed(False)) # fails to send
+ self.store.create_appservice_txn = Mock(
+ return_value=defer.succeed(txn)
+ )
+
+ # actual call
+ self.txnctrl.send(service, events)
+
+ self.store.create_appservice_txn.assert_called_once_with(
+ service=service, events=events
+ )
+ self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made
+ self.assertEquals(1, self.recoverer.recover.call_count) # and invoked
+ self.assertEquals(1, len(self.txnctrl.recoverers)) # and stored
+ self.assertEquals(0, txn.complete.call_count) # txn not completed
+ self.store.set_appservice_state.assert_called_once_with(
+ service, ApplicationServiceState.DOWN # service marked as down
+ )
+
+
+class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.clock = MockClock()
+ self.as_api = Mock()
+ self.store = Mock()
+ self.service = Mock()
+ self.callback = Mock()
+ self.recoverer = _Recoverer(
+ clock=self.clock,
+ as_api=self.as_api,
+ store=self.store,
+ service=self.service,
+ callback=self.callback,
+ )
+
+ def test_recover_single_txn(self):
+ txn = Mock()
+ # return one txn to send, then no more old txns
+ txns = [txn, None]
+
+ def take_txn(*args, **kwargs):
+ return defer.succeed(txns.pop(0))
+ self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn)
+
+ self.recoverer.recover()
+ # shouldn't have called anything prior to waiting for exp backoff
+ self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
+ txn.send = Mock(return_value=True)
+ # wait for exp backoff
+ self.clock.advance_time(2)
+ self.assertEquals(1, txn.send.call_count)
+ self.assertEquals(1, txn.complete.call_count)
+ # 2 because it needs to get None to know there are no more txns
+ self.assertEquals(2, self.store.get_oldest_unsent_txn.call_count)
+ self.callback.assert_called_once_with(self.recoverer)
+ self.assertEquals(self.recoverer.service, self.service)
+
+ def test_recover_retry_txn(self):
+ txn = Mock()
+ txns = [txn, None]
+ pop_txn = False
+
+ def take_txn(*args, **kwargs):
+ if pop_txn:
+ return defer.succeed(txns.pop(0))
+ else:
+ return defer.succeed(txn)
+ self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn)
+
+ self.recoverer.recover()
+ self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
+ txn.send = Mock(return_value=False)
+ self.clock.advance_time(2)
+ self.assertEquals(1, txn.send.call_count)
+ self.assertEquals(0, txn.complete.call_count)
+ self.assertEquals(0, self.callback.call_count)
+ self.clock.advance_time(4)
+ self.assertEquals(2, txn.send.call_count)
+ self.assertEquals(0, txn.complete.call_count)
+ self.assertEquals(0, self.callback.call_count)
+ self.clock.advance_time(8)
+ self.assertEquals(3, txn.send.call_count)
+ self.assertEquals(0, txn.complete.call_count)
+ self.assertEquals(0, self.callback.call_count)
+ txn.send = Mock(return_value=True) # successfully send the txn
+ pop_txn = True # returns the txn the first time, then no more.
+ self.clock.advance_time(16)
+ self.assertEquals(1, txn.send.call_count) # new mock reset call count
+ self.assertEquals(1, txn.complete.call_count)
+ self.callback.assert_called_once_with(self.recoverer)
+
+
+class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.txn_ctrl = Mock()
+ self.queuer = _ServiceQueuer(self.txn_ctrl)
+
+ def test_send_single_event_no_queue(self):
+ # Expect the event to be sent immediately.
+ service = Mock(id=4)
+ event = Mock()
+ self.queuer.enqueue(service, event)
+ self.txn_ctrl.send.assert_called_once_with(service, [event])
+
+ def test_send_single_event_with_queue(self):
+ d = defer.Deferred()
+ self.txn_ctrl.send = Mock(return_value=d)
+ service = Mock(id=4)
+ event = Mock(event_id="first")
+ event2 = Mock(event_id="second")
+ event3 = Mock(event_id="third")
+ # Send an event and don't resolve it just yet.
+ self.queuer.enqueue(service, event)
+ # Send more events: expect send() to NOT be called multiple times.
+ self.queuer.enqueue(service, event2)
+ self.queuer.enqueue(service, event3)
+ self.txn_ctrl.send.assert_called_with(service, [event])
+ self.assertEquals(1, self.txn_ctrl.send.call_count)
+ # Resolve the send event: expect the queued events to be sent
+ d.callback(service)
+ self.txn_ctrl.send.assert_called_with(service, [event2, event3])
+ self.assertEquals(2, self.txn_ctrl.send.call_count)
+
+ def test_multiple_service_queues(self):
+ # Tests that each service has its own queue, and that they don't block
+ # on each other.
+ srv1 = Mock(id=4)
+ srv_1_defer = defer.Deferred()
+ srv_1_event = Mock(event_id="srv1a")
+ srv_1_event2 = Mock(event_id="srv1b")
+
+ srv2 = Mock(id=6)
+ srv_2_defer = defer.Deferred()
+ srv_2_event = Mock(event_id="srv2a")
+ srv_2_event2 = Mock(event_id="srv2b")
+
+ send_return_list = [srv_1_defer, srv_2_defer]
+ self.txn_ctrl.send = Mock(side_effect=lambda x,y: send_return_list.pop(0))
+
+ # send events for different ASes and make sure they are sent
+ self.queuer.enqueue(srv1, srv_1_event)
+ self.queuer.enqueue(srv1, srv_1_event2)
+ self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event])
+ self.queuer.enqueue(srv2, srv_2_event)
+ self.queuer.enqueue(srv2, srv_2_event2)
+ self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event])
+
+ # make sure callbacks for a service only send queued events for THAT
+ # service
+ srv_2_defer.callback(srv2)
+ self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2])
+ self.assertEquals(3, self.txn_ctrl.send.call_count)
diff --git a/tests/crypto/__init__.py b/tests/crypto/__init__.py
new file mode 100644
index 00000000..9bff9ec1
--- /dev/null
+++ b/tests/crypto/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
new file mode 100644
index 00000000..79134729
--- /dev/null
+++ b/tests/crypto/test_event_signing.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+
+from synapse.events.builder import EventBuilder
+from synapse.crypto.event_signing import add_hashes_and_signatures
+
+from unpaddedbase64 import decode_base64
+
+import nacl.signing
+
+
+# Perform these tests using given secret key so we get entirely deterministic
+# signatures output that we can test against.
+SIGNING_KEY_SEED = decode_base64(
+ "YJDBA9Xnr2sVqXD9Vj7XVUnmFZcZrlw8Md7kMW+3XA1"
+)
+
+KEY_ALG = "ed25519"
+KEY_VER = 1
+KEY_NAME = "%s:%d" % (KEY_ALG, KEY_VER)
+
+HOSTNAME = "domain"
+
+
+class EventSigningTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.signing_key = nacl.signing.SigningKey(SIGNING_KEY_SEED)
+ self.signing_key.alg = KEY_ALG
+ self.signing_key.version = KEY_VER
+
+ def test_sign_minimal(self):
+ builder = EventBuilder(
+ {
+ 'event_id': "$0:domain",
+ 'origin': "domain",
+ 'origin_server_ts': 1000000,
+ 'signatures': {},
+ 'type': "X",
+ 'unsigned': {'age_ts': 1000000},
+ },
+ )
+
+ add_hashes_and_signatures(builder, HOSTNAME, self.signing_key)
+
+ event = builder.build()
+
+ self.assertTrue(hasattr(event, 'hashes'))
+ self.assertIn('sha256', event.hashes)
+ self.assertEquals(
+ event.hashes['sha256'],
+ "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI",
+ )
+
+ self.assertTrue(hasattr(event, 'signatures'))
+ self.assertIn(HOSTNAME, event.signatures)
+ self.assertIn(KEY_NAME, event.signatures["domain"])
+ self.assertEquals(
+ event.signatures[HOSTNAME][KEY_NAME],
+ "2Wptgo4CwmLo/Y8B8qinxApKaCkBG2fjTWB7AbP5Uy+"
+ "aIbygsSdLOFzvdDjww8zUVKCmI02eP9xtyJxc/cLiBA",
+ )
+
+ def test_sign_message(self):
+ builder = EventBuilder(
+ {
+ 'content': {
+ 'body': "Here is the message content",
+ },
+ 'event_id': "$0:domain",
+ 'origin': "domain",
+ 'origin_server_ts': 1000000,
+ 'type': "m.room.message",
+ 'room_id': "!r:domain",
+ 'sender': "@u:domain",
+ 'signatures': {},
+ 'unsigned': {'age_ts': 1000000},
+ }
+ )
+
+ add_hashes_and_signatures(builder, HOSTNAME, self.signing_key)
+
+ event = builder.build()
+
+ self.assertTrue(hasattr(event, 'hashes'))
+ self.assertIn('sha256', event.hashes)
+ self.assertEquals(
+ event.hashes['sha256'],
+ "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g",
+ )
+
+ self.assertTrue(hasattr(event, 'signatures'))
+ self.assertIn(HOSTNAME, event.signatures)
+ self.assertIn(KEY_NAME, event.signatures["domain"])
+ self.assertEquals(
+ event.signatures[HOSTNAME][KEY_NAME],
+ "Wm+VzmOUOz08Ds+0NTWb1d4CZrVsJSikkeRxh6aCcUw"
+ "u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA"
+ )
diff --git a/tests/events/__init__.py b/tests/events/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/events/__init__.py
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
new file mode 100644
index 00000000..16179921
--- /dev/null
+++ b/tests/events/test_utils.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .. import unittest
+
+from synapse.events import FrozenEvent
+from synapse.events.utils import prune_event
+
+class PruneEventTestCase(unittest.TestCase):
+ """ Asserts that a new event constructed with `evdict` will look like
+ `matchdict` when it is redacted. """
+ def run_test(self, evdict, matchdict):
+ self.assertEquals(
+ prune_event(FrozenEvent(evdict)).get_dict(),
+ matchdict
+ )
+
+ def test_minimal(self):
+ self.run_test(
+ {'type': 'A'},
+ {
+ 'type': 'A',
+ 'content': {},
+ 'signatures': {},
+ 'unsigned': {},
+ }
+ )
+
+ def test_basic_keys(self):
+ self.run_test(
+ {
+ 'type': 'A',
+ 'room_id': '!1:domain',
+ 'sender': '@2:domain',
+ 'event_id': '$3:domain',
+ 'origin': 'domain',
+ },
+ {
+ 'type': 'A',
+ 'room_id': '!1:domain',
+ 'sender': '@2:domain',
+ 'event_id': '$3:domain',
+ 'origin': 'domain',
+ 'content': {},
+ 'signatures': {},
+ 'unsigned': {},
+ }
+ )
+
+ def test_unsigned_age_ts(self):
+ self.run_test(
+ {
+ 'type': 'B',
+ 'unsigned': {'age_ts': 20},
+ },
+ {
+ 'type': 'B',
+ 'content': {},
+ 'signatures': {},
+ 'unsigned': {'age_ts': 20},
+ }
+ )
+
+ self.run_test(
+ {
+ 'type': 'B',
+ 'unsigned': {'other_key': 'here'},
+ },
+ {
+ 'type': 'B',
+ 'content': {},
+ 'signatures': {},
+ 'unsigned': {},
+ }
+ )
+
+ def test_content(self):
+ self.run_test(
+ {
+ 'type': 'C',
+ 'content': {'things': 'here'},
+ },
+ {
+ 'type': 'C',
+ 'content': {},
+ 'signatures': {},
+ 'unsigned': {},
+ }
+ )
+
+ self.run_test(
+ {
+ 'type': 'm.room.create',
+ 'content': {'creator': '@2:domain', 'other_field': 'here'},
+ },
+ {
+ 'type': 'm.room.create',
+ 'content': {'creator': '@2:domain'},
+ 'signatures': {},
+ 'unsigned': {},
+ }
+ )
diff --git a/tests/federation/__init__.py b/tests/federation/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/federation/__init__.py
diff --git a/tests/federation/test_federation.py b/tests/federation/test_federation.py
new file mode 100644
index 00000000..a4ef60b9
--- /dev/null
+++ b/tests/federation/test_federation.py
@@ -0,0 +1,301 @@
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# trial imports
+from twisted.internet import defer
+from tests import unittest
+
+# python imports
+from mock import Mock, ANY
+
+from ..utils import MockHttpResource, MockClock, setup_test_homeserver
+
+from synapse.federation import initialize_http_replication
+from synapse.events import FrozenEvent
+
+
+def make_pdu(prev_pdus=[], **kwargs):
+ """Provide some default fields for making a PduTuple."""
+ pdu_fields = {
+ "state_key": None,
+ "prev_events": prev_pdus,
+ }
+ pdu_fields.update(kwargs)
+
+ return FrozenEvent(pdu_fields)
+
+
+class FederationTestCase(unittest.TestCase):
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource()
+ self.mock_http_client = Mock(spec=[
+ "get_json",
+ "put_json",
+ ])
+ self.mock_persistence = Mock(spec=[
+ "prep_send_transaction",
+ "delivered_txn",
+ "get_received_txn_response",
+ "set_received_txn_response",
+ "get_destination_retry_timings",
+ "get_auth_chain",
+ ])
+ self.mock_persistence.get_received_txn_response.return_value = (
+ defer.succeed(None)
+ )
+
+ retry_timings_res = {
+ "destination": "",
+ "retry_last_ts": 0,
+ "retry_interval": 0,
+ }
+ self.mock_persistence.get_destination_retry_timings.return_value = (
+ defer.succeed(retry_timings_res)
+ )
+ self.mock_persistence.get_auth_chain.return_value = []
+ self.clock = MockClock()
+ hs = yield setup_test_homeserver(
+ resource_for_federation=self.mock_resource,
+ http_client=self.mock_http_client,
+ datastore=self.mock_persistence,
+ clock=self.clock,
+ keyring=Mock(),
+ )
+ self.federation = initialize_http_replication(hs)
+ self.distributor = hs.get_distributor()
+
+ @defer.inlineCallbacks
+ def test_get_state(self):
+ mock_handler = Mock(spec=[
+ "get_state_for_pdu",
+ ])
+
+ self.federation.set_handler(mock_handler)
+
+ mock_handler.get_state_for_pdu.return_value = defer.succeed([])
+
+ # Empty context initially
+ (code, response) = yield self.mock_resource.trigger(
+ "GET",
+ "/_matrix/federation/v1/state/my-context/",
+ None
+ )
+ self.assertEquals(200, code)
+ self.assertFalse(response["pdus"])
+
+ # Now lets give the context some state
+ mock_handler.get_state_for_pdu.return_value = (
+ defer.succeed([
+ make_pdu(
+ event_id="the-pdu-id",
+ origin="red",
+ user_id="@a:red",
+ room_id="my-context",
+ type="m.topic",
+ origin_server_ts=123456789000,
+ depth=1,
+ content={"topic": "The topic"},
+ state_key="",
+ power_level=1000,
+ prev_state="last-pdu-id",
+ ),
+ ])
+ )
+
+ (code, response) = yield self.mock_resource.trigger(
+ "GET",
+ "/_matrix/federation/v1/state/my-context/",
+ None
+ )
+ self.assertEquals(200, code)
+ self.assertEquals(1, len(response["pdus"]))
+
+ @defer.inlineCallbacks
+ def test_get_pdu(self):
+ mock_handler = Mock(spec=[
+ "get_persisted_pdu",
+ ])
+
+ self.federation.set_handler(mock_handler)
+
+ mock_handler.get_persisted_pdu.return_value = (
+ defer.succeed(None)
+ )
+
+ (code, response) = yield self.mock_resource.trigger(
+ "GET",
+ "/_matrix/federation/v1/event/abc123def456/",
+ None
+ )
+ self.assertEquals(404, code)
+
+ # Now insert such a PDU
+ mock_handler.get_persisted_pdu.return_value = (
+ defer.succeed(
+ make_pdu(
+ event_id="abc123def456",
+ origin="red",
+ user_id="@a:red",
+ room_id="my-context",
+ type="m.text",
+ origin_server_ts=123456789001,
+ depth=1,
+ content={"text": "Here is the message"},
+ )
+ )
+ )
+
+ (code, response) = yield self.mock_resource.trigger(
+ "GET",
+ "/_matrix/federation/v1/event/abc123def456/",
+ None
+ )
+ self.assertEquals(200, code)
+ self.assertEquals(1, len(response["pdus"]))
+ self.assertEquals("m.text", response["pdus"][0]["type"])
+
+ @defer.inlineCallbacks
+ def test_send_pdu(self):
+ self.mock_http_client.put_json.return_value = defer.succeed(
+ (200, "OK")
+ )
+
+ pdu = make_pdu(
+ event_id="abc123def456",
+ origin="red",
+ user_id="@a:red",
+ room_id="my-context",
+ type="m.text",
+ origin_server_ts=123456789001,
+ depth=1,
+ content={"text": "Here is the message"},
+ )
+
+ yield self.federation.send_pdu(pdu, ["remote"])
+
+ self.mock_http_client.put_json.assert_called_with(
+ "remote",
+ path="/_matrix/federation/v1/send/1000000/",
+ data={
+ "origin_server_ts": 1000000,
+ "origin": "test",
+ "pdus": [
+ pdu.get_pdu_json(),
+ ],
+ 'pdu_failures': [],
+ },
+ json_data_callback=ANY,
+ )
+
+ @defer.inlineCallbacks
+ def test_send_edu(self):
+ self.mock_http_client.put_json.return_value = defer.succeed(
+ (200, "OK")
+ )
+
+ yield self.federation.send_edu(
+ destination="remote",
+ edu_type="m.test",
+ content={"testing": "content here"},
+ )
+
+ # MockClock ensures we can guess these timestamps
+ self.mock_http_client.put_json.assert_called_with(
+ "remote",
+ path="/_matrix/federation/v1/send/1000000/",
+ data={
+ "origin": "test",
+ "origin_server_ts": 1000000,
+ "pdus": [],
+ "edus": [
+ {
+ "edu_type": "m.test",
+ "content": {"testing": "content here"},
+ }
+ ],
+ 'pdu_failures': [],
+ },
+ json_data_callback=ANY,
+ )
+
+ @defer.inlineCallbacks
+ def test_recv_edu(self):
+ recv_observer = Mock()
+ recv_observer.return_value = defer.succeed(())
+
+ self.federation.register_edu_handler("m.test", recv_observer)
+
+ yield self.mock_resource.trigger(
+ "PUT",
+ "/_matrix/federation/v1/send/1001000/",
+ """{
+ "origin": "remote",
+ "origin_server_ts": 1001000,
+ "pdus": [],
+ "edus": [
+ {
+ "origin": "remote",
+ "destination": "test",
+ "edu_type": "m.test",
+ "content": {"testing": "reply here"}
+ }
+ ]
+ }"""
+ )
+
+ recv_observer.assert_called_with(
+ "remote", {"testing": "reply here"}
+ )
+
+ @defer.inlineCallbacks
+ def test_send_query(self):
+ self.mock_http_client.get_json.return_value = defer.succeed(
+ {"your": "response"}
+ )
+
+ response = yield self.federation.make_query(
+ destination="remote",
+ query_type="a-question",
+ args={"one": "1", "two": "2"},
+ )
+
+ self.assertEquals({"your": "response"}, response)
+
+ self.mock_http_client.get_json.assert_called_with(
+ destination="remote",
+ path="/_matrix/federation/v1/query/a-question",
+ args={"one": "1", "two": "2"},
+ retry_on_dns_fail=True,
+ )
+
+ @defer.inlineCallbacks
+ def test_recv_query(self):
+ recv_handler = Mock()
+ recv_handler.return_value = defer.succeed({"another": "response"})
+
+ self.federation.register_query_handler("a-question", recv_handler)
+
+ code, response = yield self.mock_resource.trigger(
+ "GET",
+ "/_matrix/federation/v1/query/a-question?three=3&four=4",
+ None
+ )
+
+ self.assertEquals(200, code)
+ self.assertEquals({"another": "response"}, response)
+
+ recv_handler.assert_called_with(
+ {"three": "3", "four": "4"}
+ )
diff --git a/tests/handlers/__init__.py b/tests/handlers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/handlers/__init__.py
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
new file mode 100644
index 00000000..9e95d1e5
--- /dev/null
+++ b/tests/handlers/test_appservice.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from .. import unittest
+
+from synapse.handlers.appservice import ApplicationServicesHandler
+
+from mock import Mock
+
+
+class AppServiceHandlerTestCase(unittest.TestCase):
+ """ Tests the ApplicationServicesHandler. """
+
+ def setUp(self):
+ self.mock_store = Mock()
+ self.mock_as_api = Mock()
+ self.mock_scheduler = Mock()
+ hs = Mock()
+ hs.get_datastore = Mock(return_value=self.mock_store)
+ self.handler = ApplicationServicesHandler(
+ hs, self.mock_as_api, self.mock_scheduler
+ )
+
+ @defer.inlineCallbacks
+ def test_notify_interested_services(self):
+ interested_service = self._mkservice(is_interested=True)
+ services = [
+ self._mkservice(is_interested=False),
+ interested_service,
+ self._mkservice(is_interested=False)
+ ]
+
+ self.mock_store.get_app_services = Mock(return_value=services)
+ self.mock_store.get_user_by_id = Mock(return_value=[])
+
+ event = Mock(
+ sender="@someone:anywhere",
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+ self.mock_as_api.push = Mock()
+ yield self.handler.notify_interested_services(event)
+ self.mock_scheduler.submit_event_for_as.assert_called_once_with(
+ interested_service, event
+ )
+
+ @defer.inlineCallbacks
+ def test_query_user_exists_unknown_user(self):
+ user_id = "@someone:anywhere"
+ services = [self._mkservice(is_interested=True)]
+ services[0].is_interested_in_user = Mock(return_value=True)
+ self.mock_store.get_app_services = Mock(return_value=services)
+ self.mock_store.get_user_by_id = Mock(return_value=None)
+
+ event = Mock(
+ sender=user_id,
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+ self.mock_as_api.push = Mock()
+ self.mock_as_api.query_user = Mock()
+ yield self.handler.notify_interested_services(event)
+ self.mock_as_api.query_user.assert_called_once_with(
+ services[0], user_id
+ )
+
+ @defer.inlineCallbacks
+ def test_query_user_exists_known_user(self):
+ user_id = "@someone:anywhere"
+ services = [self._mkservice(is_interested=True)]
+ services[0].is_interested_in_user = Mock(return_value=True)
+ self.mock_store.get_app_services = Mock(return_value=services)
+ self.mock_store.get_user_by_id = Mock(return_value={
+ "name": user_id
+ })
+
+ event = Mock(
+ sender=user_id,
+ type="m.room.message",
+ room_id="!foo:bar"
+ )
+ self.mock_as_api.push = Mock()
+ self.mock_as_api.query_user = Mock()
+ yield self.handler.notify_interested_services(event)
+ self.assertFalse(
+ self.mock_as_api.query_user.called,
+ "query_user called when it shouldn't have been."
+ )
+
+ @defer.inlineCallbacks
+ def test_query_room_alias_exists(self):
+ room_alias_str = "#foo:bar"
+ room_alias = Mock()
+ room_alias.to_string = Mock(return_value=room_alias_str)
+
+ room_id = "!alpha:bet"
+ servers = ["aperture"]
+ interested_service = self._mkservice(is_interested=True)
+ services = [
+ self._mkservice(is_interested=False),
+ interested_service,
+ self._mkservice(is_interested=False)
+ ]
+
+ self.mock_store.get_app_services = Mock(return_value=services)
+ self.mock_store.get_association_from_room_alias = Mock(
+ return_value=Mock(room_id=room_id, servers=servers)
+ )
+
+ result = yield self.handler.query_room_alias_exists(room_alias)
+
+ self.mock_as_api.query_alias.assert_called_once_with(
+ interested_service,
+ room_alias_str
+ )
+ self.assertEquals(result.room_id, room_id)
+ self.assertEquals(result.servers, servers)
+
+
+
+ def _mkservice(self, is_interested):
+ service = Mock()
+ service.is_interested = Mock(return_value=is_interested)
+ service.token = "mock_service_token"
+ service.url = "mock_service_url"
+ return service
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
new file mode 100644
index 00000000..978e4d0d
--- /dev/null
+++ b/tests/handlers/test_auth.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pymacaroons
+
+from mock import Mock, NonCallableMock
+from synapse.handlers.auth import AuthHandler
+from tests import unittest
+from tests.utils import setup_test_homeserver
+from twisted.internet import defer
+
+
+class AuthHandlers(object):
+ def __init__(self, hs):
+ self.auth_handler = AuthHandler(hs)
+
+
+class AuthTestCase(unittest.TestCase):
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hs = yield setup_test_homeserver(handlers=None)
+ self.hs.handlers = AuthHandlers(self.hs)
+
+ def test_token_is_a_macaroon(self):
+ self.hs.config.macaroon_secret_key = "this key is a huge secret"
+
+ token = self.hs.handlers.auth_handler.generate_access_token("some_user")
+ # Check that we can parse the thing with pymacaroons
+ macaroon = pymacaroons.Macaroon.deserialize(token)
+ # The most basic of sanity checks
+ if "some_user" not in macaroon.inspect():
+ self.fail("some_user was not in %s" % macaroon.inspect())
+
+ def test_macaroon_caveats(self):
+ self.hs.config.macaroon_secret_key = "this key is a massive secret"
+ self.hs.clock.now = 5000
+
+ token = self.hs.handlers.auth_handler.generate_access_token("a_user")
+ macaroon = pymacaroons.Macaroon.deserialize(token)
+
+ def verify_gen(caveat):
+ return caveat == "gen = 1"
+
+ def verify_user(caveat):
+ return caveat == "user_id = a_user"
+
+ def verify_type(caveat):
+ return caveat == "type = access"
+
+ def verify_expiry(caveat):
+ return caveat == "time < 8600000"
+
+ v = pymacaroons.Verifier()
+ v.satisfy_general(verify_gen)
+ v.satisfy_general(verify_user)
+ v.satisfy_general(verify_type)
+ v.satisfy_general(verify_expiry)
+ v.verify(macaroon, self.hs.config.macaroon_secret_key)
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
new file mode 100644
index 00000000..27306ba4
--- /dev/null
+++ b/tests/handlers/test_directory.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock
+
+from synapse.handlers.directory import DirectoryHandler
+from synapse.types import RoomAlias
+
+from tests.utils import setup_test_homeserver
+
+
+class DirectoryHandlers(object):
+ def __init__(self, hs):
+ self.directory_handler = DirectoryHandler(hs)
+
+
+class DirectoryTestCase(unittest.TestCase):
+ """ Tests the directory service. """
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_federation = Mock(spec=[
+ "make_query",
+ ])
+
+ self.query_handlers = {}
+
+ def register_query_handler(query_type, handler):
+ self.query_handlers[query_type] = handler
+ self.mock_federation.register_query_handler = register_query_handler
+
+ hs = yield setup_test_homeserver(
+ http_client=None,
+ resource_for_federation=Mock(),
+ replication_layer=self.mock_federation,
+ )
+ hs.handlers = DirectoryHandlers(hs)
+
+ self.handler = hs.get_handlers().directory_handler
+
+ self.store = hs.get_datastore()
+
+ self.my_room = RoomAlias.from_string("#my-room:test")
+ self.your_room = RoomAlias.from_string("#your-room:test")
+ self.remote_room = RoomAlias.from_string("#another:remote")
+
+ @defer.inlineCallbacks
+ def test_get_local_association(self):
+ yield self.store.create_room_alias_association(
+ self.my_room, "!8765qwer:test", ["test"]
+ )
+
+ result = yield self.handler.get_association(self.my_room)
+
+ self.assertEquals({
+ "room_id": "!8765qwer:test",
+ "servers": ["test"],
+ }, result)
+
+ @defer.inlineCallbacks
+ def test_get_remote_association(self):
+ self.mock_federation.make_query.return_value = defer.succeed(
+ {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
+ )
+
+ result = yield self.handler.get_association(self.remote_room)
+
+ self.assertEquals({
+ "room_id": "!8765qwer:test",
+ "servers": ["test", "remote"],
+ }, result)
+ self.mock_federation.make_query.assert_called_with(
+ destination="remote",
+ query_type="directory",
+ args={
+ "room_alias": "#another:remote",
+ },
+ retry_on_dns_fail=False,
+ )
+
+ @defer.inlineCallbacks
+ def test_incoming_fed_query(self):
+ yield self.store.create_room_alias_association(
+ self.your_room, "!8765asdf:test", ["test"]
+ )
+
+ response = yield self.query_handlers["directory"](
+ {"room_alias": "#your-room:test"}
+ )
+
+ self.assertEquals({
+ "room_id": "!8765asdf:test",
+ "servers": ["test"],
+ }, response)
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
new file mode 100644
index 00000000..d392c230
--- /dev/null
+++ b/tests/handlers/test_federation.py
@@ -0,0 +1,130 @@
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+from tests import unittest
+
+from synapse.api.constants import EventTypes
+from synapse.events import FrozenEvent
+from synapse.handlers.federation import FederationHandler
+
+from mock import NonCallableMock, ANY, Mock
+
+from ..utils import setup_test_homeserver
+
+
+class FederationTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+
+ self.state_handler = NonCallableMock(spec_set=[
+ "compute_event_context",
+ ])
+
+ self.auth = NonCallableMock(spec_set=[
+ "check",
+ "check_host_in_room",
+ ])
+
+ self.hostname = "test"
+ hs = yield setup_test_homeserver(
+ self.hostname,
+ datastore=NonCallableMock(spec_set=[
+ "persist_event",
+ "store_room",
+ "get_room",
+ "get_destination_retry_timings",
+ "set_destination_retry_timings",
+ "have_events",
+ ]),
+ resource_for_federation=NonCallableMock(),
+ http_client=NonCallableMock(spec_set=[]),
+ notifier=NonCallableMock(spec_set=["on_new_room_event"]),
+ handlers=NonCallableMock(spec_set=[
+ "room_member_handler",
+ "federation_handler",
+ ]),
+ auth=self.auth,
+ state_handler=self.state_handler,
+ keyring=Mock(),
+ )
+
+ self.datastore = hs.get_datastore()
+ self.handlers = hs.get_handlers()
+ self.notifier = hs.get_notifier()
+ self.hs = hs
+
+ self.handlers.federation_handler = FederationHandler(self.hs)
+
+ @defer.inlineCallbacks
+ def test_msg(self):
+ pdu = FrozenEvent({
+ "type": EventTypes.Message,
+ "room_id": "foo",
+ "content": {"msgtype": u"fooo"},
+ "origin_server_ts": 0,
+ "event_id": "$a:b",
+ "user_id":"@a:b",
+ "origin": "b",
+ "auth_events": [],
+ "hashes": {"sha256":"AcLrgtUIqqwaGoHhrEvYG1YLDIsVPYJdSRGhkp3jJp8"},
+ })
+
+ self.datastore.persist_event.return_value = defer.succeed((1,1))
+ self.datastore.get_room.return_value = defer.succeed(True)
+ self.auth.check_host_in_room.return_value = defer.succeed(True)
+
+ retry_timings_res = {
+ "destination": "",
+ "retry_last_ts": 0,
+ "retry_interval": 0,
+ }
+ self.datastore.get_destination_retry_timings.return_value = (
+ defer.succeed(retry_timings_res)
+ )
+
+ def have_events(event_ids):
+ return defer.succeed({})
+ self.datastore.have_events.side_effect = have_events
+
+ def annotate(ev, old_state=None, outlier=False):
+ context = Mock()
+ context.current_state = {}
+ context.auth_events = {}
+ return defer.succeed(context)
+ self.state_handler.compute_event_context.side_effect = annotate
+
+ yield self.handlers.federation_handler.on_receive_pdu(
+ "fo", pdu, False
+ )
+
+ self.datastore.persist_event.assert_called_once_with(
+ ANY,
+ is_new_state=True,
+ backfilled=False,
+ current_state=None,
+ context=ANY,
+ )
+
+ self.state_handler.compute_event_context.assert_called_once_with(
+ ANY, old_state=None, outlier=False
+ )
+
+ self.auth.check.assert_called_once_with(ANY, auth_events={})
+
+ self.notifier.on_new_room_event.assert_called_once_with(
+ ANY, 1, 1, extra_users=[]
+ )
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
new file mode 100644
index 00000000..10d4482c
--- /dev/null
+++ b/tests/handlers/test_presence.py
@@ -0,0 +1,1328 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer, reactor
+
+from mock import Mock, call, ANY, NonCallableMock
+import json
+
+from tests.utils import (
+ MockHttpResource, MockClock, DeferredMockCallable, setup_test_homeserver
+)
+
+from synapse.api.constants import PresenceState
+from synapse.api.errors import SynapseError
+from synapse.handlers.presence import PresenceHandler, UserPresenceCache
+from synapse.streams.config import SourcePaginationConfig
+from synapse.storage.transactions import DestinationsTable
+from synapse.types import UserID
+
+OFFLINE = PresenceState.OFFLINE
+UNAVAILABLE = PresenceState.UNAVAILABLE
+ONLINE = PresenceState.ONLINE
+
+
+def _expect_edu(destination, edu_type, content, origin="test"):
+ return {
+ "origin": origin,
+ "origin_server_ts": 1000000,
+ "pdus": [],
+ "edus": [
+ {
+ "edu_type": edu_type,
+ "content": content,
+ }
+ ],
+ "pdu_failures": [],
+ }
+
+def _make_edu_json(origin, edu_type, content):
+ return json.dumps(_expect_edu("test", edu_type, content, origin=origin))
+
+
+class JustPresenceHandlers(object):
+ def __init__(self, hs):
+ self.presence_handler = PresenceHandler(hs)
+
+
+class PresenceTestCase(unittest.TestCase):
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.clock = MockClock()
+
+ self.mock_federation_resource = MockHttpResource()
+
+ self.mock_http_client = Mock(spec=[])
+ self.mock_http_client.put_json = DeferredMockCallable()
+
+ hs_kwargs = {}
+ if hasattr(self, "make_datastore_mock"):
+ hs_kwargs["datastore"] = self.make_datastore_mock()
+
+ hs = yield setup_test_homeserver(
+ clock=self.clock,
+ handlers=None,
+ resource_for_federation=self.mock_federation_resource,
+ http_client=self.mock_http_client,
+ keyring=Mock(),
+ **hs_kwargs
+ )
+ hs.handlers = JustPresenceHandlers(hs)
+
+ self.datastore = hs.get_datastore()
+
+ self.setUp_roommemberhandler_mocks(hs.handlers)
+
+ self.handler = hs.get_handlers().presence_handler
+ self.event_source = hs.get_event_sources().sources["presence"]
+
+ self.distributor = hs.get_distributor()
+ self.distributor.declare("user_joined_room")
+
+ yield self.setUp_users(hs)
+
+ def setUp_roommemberhandler_mocks(self, handlers):
+ self.room_id = "a-room"
+ self.room_members = []
+
+ room_member_handler = handlers.room_member_handler = Mock(spec=[
+ "get_joined_rooms_for_user",
+ "get_room_members",
+ "fetch_room_distributions_into",
+ ])
+ self.room_member_handler = room_member_handler
+
+ def get_rooms_for_user(user):
+ if user in self.room_members:
+ return defer.succeed([self.room_id])
+ else:
+ return defer.succeed([])
+ room_member_handler.get_joined_rooms_for_user = get_rooms_for_user
+
+ def get_room_members(room_id):
+ if room_id == self.room_id:
+ return defer.succeed(self.room_members)
+ else:
+ return defer.succeed([])
+ room_member_handler.get_room_members = get_room_members
+
+ @defer.inlineCallbacks
+ def fetch_room_distributions_into(room_id, localusers=None,
+ remotedomains=None, ignore_user=None):
+
+ members = yield get_room_members(room_id)
+ for member in members:
+ if ignore_user is not None and member == ignore_user:
+ continue
+
+ if member.is_mine:
+ if localusers is not None:
+ localusers.add(member)
+ else:
+ if remotedomains is not None:
+ remotedomains.add(member.domain)
+ room_member_handler.fetch_room_distributions_into = (
+ fetch_room_distributions_into)
+
+ self.setUp_datastore_room_mocks(self.datastore)
+
+ def setUp_datastore_room_mocks(self, datastore):
+ def get_room_hosts(room_id):
+ if room_id == self.room_id:
+ hosts = set([u.domain for u in self.room_members])
+ return defer.succeed(hosts)
+ else:
+ return defer.succeed([])
+ datastore.get_joined_hosts_for_room = get_room_hosts
+
+ def user_rooms_intersect(userlist):
+ room_member_ids = map(lambda u: u.to_string(), self.room_members)
+
+ shared = all(map(lambda i: i in room_member_ids, userlist))
+ return defer.succeed(shared)
+ datastore.user_rooms_intersect = user_rooms_intersect
+
+ @defer.inlineCallbacks
+ def setUp_users(self, hs):
+ # Some local users to test with
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+ self.u_clementine = UserID.from_string("@clementine:test")
+
+ for u in self.u_apple, self.u_banana, self.u_clementine:
+ yield self.datastore.create_presence(u.localpart)
+
+ yield self.datastore.set_presence_state(
+ self.u_apple.localpart, {"state": ONLINE, "status_msg": "Online"}
+ )
+
+ # ID of a local user that does not exist
+ self.u_durian = UserID.from_string("@durian:test")
+
+ # A remote user
+ self.u_cabbage = UserID.from_string("@cabbage:elsewhere")
+
+
+class MockedDatastorePresenceTestCase(PresenceTestCase):
+ def make_datastore_mock(self):
+ datastore = Mock(spec=[
+ # Bits that Federation needs
+ "prep_send_transaction",
+ "delivered_txn",
+ "get_received_txn_response",
+ "set_received_txn_response",
+ "get_destination_retry_timings",
+ ])
+
+ self.setUp_datastore_federation_mocks(datastore)
+ self.setUp_datastore_presence_mocks(datastore)
+
+ return datastore
+
+ def setUp_datastore_federation_mocks(self, datastore):
+ retry_timings_res = {
+ "destination": "",
+ "retry_last_ts": 0,
+ "retry_interval": 0,
+ }
+ datastore.get_destination_retry_timings.return_value = (
+ defer.succeed(retry_timings_res)
+ )
+
+ def get_received_txn_response(*args):
+ return defer.succeed(None)
+ datastore.get_received_txn_response = get_received_txn_response
+
+ def setUp_datastore_presence_mocks(self, datastore):
+ self.current_user_state = {
+ "apple": OFFLINE,
+ "banana": OFFLINE,
+ "clementine": OFFLINE,
+ "fig": OFFLINE,
+ }
+
+ def get_presence_state(user_localpart):
+ return defer.succeed(
+ {"state": self.current_user_state[user_localpart],
+ "status_msg": None,
+ "mtime": 123456000}
+ )
+ datastore.get_presence_state = get_presence_state
+
+ def set_presence_state(user_localpart, new_state):
+ was = self.current_user_state[user_localpart]
+ self.current_user_state[user_localpart] = new_state["state"]
+ return defer.succeed({"state": was})
+ datastore.set_presence_state = set_presence_state
+
+ def get_presence_list(user_localpart, accepted):
+ if not user_localpart in self.PRESENCE_LIST:
+ return defer.succeed([])
+ return defer.succeed([
+ {"observed_user_id": u, "accepted": accepted} for u in
+ self.PRESENCE_LIST[user_localpart]])
+ datastore.get_presence_list = get_presence_list
+
+ def is_presence_visible(observed_localpart, observer_userid):
+ return True
+ datastore.is_presence_visible = is_presence_visible
+
+ @defer.inlineCallbacks
+ def setUp_users(self, hs):
+ # Some local users to test with
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+ self.u_clementine = UserID.from_string("@clementine:test")
+ self.u_durian = UserID.from_string("@durian:test")
+ self.u_elderberry = UserID.from_string("@elderberry:test")
+ self.u_fig = UserID.from_string("@fig:test")
+
+ # Remote user
+ self.u_onion = UserID.from_string("@onion:farm")
+ self.u_potato = UserID.from_string("@potato:remote")
+
+ yield
+
+
+class PresenceStateTestCase(PresenceTestCase):
+ """ Tests presence management. """
+ @defer.inlineCallbacks
+ def setUp(self):
+ yield super(PresenceStateTestCase, self).setUp()
+
+ self.mock_start = Mock()
+ self.mock_stop = Mock()
+
+ self.handler.start_polling_presence = self.mock_start
+ self.handler.stop_polling_presence = self.mock_stop
+
+ @defer.inlineCallbacks
+ def test_get_my_state(self):
+ state = yield self.handler.get_state(
+ target_user=self.u_apple, auth_user=self.u_apple
+ )
+
+ self.assertEquals(
+ {"presence": ONLINE, "status_msg": "Online"},
+ state
+ )
+
+ @defer.inlineCallbacks
+ def test_get_allowed_state(self):
+ yield self.datastore.allow_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )
+
+ state = yield self.handler.get_state(
+ target_user=self.u_apple, auth_user=self.u_banana
+ )
+
+ self.assertEquals(
+ {"presence": ONLINE, "status_msg": "Online"},
+ state
+ )
+
+ @defer.inlineCallbacks
+ def test_get_same_room_state(self):
+ self.room_members = [self.u_apple, self.u_clementine]
+
+ state = yield self.handler.get_state(
+ target_user=self.u_apple, auth_user=self.u_clementine
+ )
+
+ self.assertEquals(
+ {"presence": ONLINE, "status_msg": "Online"},
+ state
+ )
+
+ @defer.inlineCallbacks
+ def test_get_disallowed_state(self):
+ self.room_members = []
+
+ yield self.assertFailure(
+ self.handler.get_state(
+ target_user=self.u_apple, auth_user=self.u_clementine
+ ),
+ SynapseError
+ )
+
+ @defer.inlineCallbacks
+ def test_set_my_state(self):
+ yield self.handler.set_state(
+ target_user=self.u_apple, auth_user=self.u_apple,
+ state={"presence": UNAVAILABLE, "status_msg": "Away"})
+
+ self.assertEquals(
+ {"state": UNAVAILABLE,
+ "status_msg": "Away",
+ "mtime": 1000000},
+ (yield self.datastore.get_presence_state(self.u_apple.localpart))
+ )
+
+ self.mock_start.assert_called_with(self.u_apple,
+ state={
+ "presence": UNAVAILABLE,
+ "status_msg": "Away",
+ "last_active": 1000000, # MockClock
+ })
+
+ yield self.handler.set_state(
+ target_user=self.u_apple, auth_user=self.u_apple,
+ state={"presence": OFFLINE})
+
+ self.mock_stop.assert_called_with(self.u_apple)
+
+
+class PresenceInvitesTestCase(PresenceTestCase):
+ """ Tests presence management. """
+ @defer.inlineCallbacks
+ def setUp(self):
+ yield super(PresenceInvitesTestCase, self).setUp()
+
+ self.mock_start = Mock()
+ self.mock_stop = Mock()
+
+ self.handler.start_polling_presence = self.mock_start
+ self.handler.stop_polling_presence = self.mock_stop
+
+ @defer.inlineCallbacks
+ def test_invite_local(self):
+ # TODO(paul): This test will likely break if/when real auth permissions
+ # are added; for now the HS will always accept any invite
+
+ yield self.handler.send_invite(
+ observer_user=self.u_apple, observed_user=self.u_banana)
+
+ self.assertEquals(
+ [{"observed_user_id": "@banana:test", "accepted": 1}],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+ self.assertTrue(
+ (yield self.datastore.is_presence_visible(
+ observed_localpart=self.u_banana.localpart,
+ observer_userid=self.u_apple.to_string(),
+ ))
+ )
+
+ self.mock_start.assert_called_with(
+ self.u_apple, target_user=self.u_banana)
+
+ @defer.inlineCallbacks
+ def test_invite_local_nonexistant(self):
+ yield self.handler.send_invite(
+ observer_user=self.u_apple, observed_user=self.u_durian)
+
+ self.assertEquals(
+ [],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ @defer.inlineCallbacks
+ def test_invite_remote(self):
+ # Use a different destination, otherwise retry logic might fail the
+ # request
+ u_rocket = UserID.from_string("@rocket:there")
+
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("there",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("there", "m.presence_invite",
+ content={
+ "observer_user": "@apple:test",
+ "observed_user": "@rocket:there",
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ yield self.handler.send_invite(
+ observer_user=self.u_apple, observed_user=u_rocket)
+
+ self.assertEquals(
+ [{"observed_user_id": "@rocket:there", "accepted": 0}],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ yield put_json.await_calls()
+
+ @defer.inlineCallbacks
+ def test_accept_remote(self):
+ # TODO(paul): This test will likely break if/when real auth permissions
+ # are added; for now the HS will always accept any invite
+
+ # Use a different destination, otherwise retry logic might fail the
+ # request
+ u_rocket = UserID.from_string("@rocket:moon")
+
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("moon",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("moon", "m.presence_accept",
+ content={
+ "observer_user": "@rocket:moon",
+ "observed_user": "@apple:test",
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("elsewhere", "m.presence_invite",
+ content={
+ "observer_user": "@rocket:moon",
+ "observed_user": "@apple:test",
+ }
+ )
+ )
+
+ self.assertTrue(
+ (yield self.datastore.is_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=u_rocket.to_string(),
+ ))
+ )
+
+ yield put_json.await_calls()
+
+ @defer.inlineCallbacks
+ def test_invited_remote_nonexistant(self):
+ # Use a different destination, otherwise retry logic might fail the
+ # request
+ u_rocket = UserID.from_string("@rocket:sun")
+
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("sun",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("sun", "m.presence_deny",
+ content={
+ "observer_user": "@rocket:sun",
+ "observed_user": "@durian:test",
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("sun", "m.presence_invite",
+ content={
+ "observer_user": "@rocket:sun",
+ "observed_user": "@durian:test",
+ }
+ )
+ )
+
+ yield put_json.await_calls()
+
+ @defer.inlineCallbacks
+ def test_accepted_remote(self):
+ yield self.datastore.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_cabbage.to_string(),
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("elsewhere", "m.presence_accept",
+ content={
+ "observer_user": "@apple:test",
+ "observed_user": "@cabbage:elsewhere",
+ }
+ )
+ )
+
+ self.assertEquals(
+ [{"observed_user_id": "@cabbage:elsewhere", "accepted": 1}],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ self.mock_start.assert_called_with(
+ self.u_apple, target_user=self.u_cabbage)
+
+ @defer.inlineCallbacks
+ def test_denied_remote(self):
+ yield self.datastore.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid="@eggplant:elsewhere",
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("elsewhere", "m.presence_deny",
+ content={
+ "observer_user": "@apple:test",
+ "observed_user": "@eggplant:elsewhere",
+ }
+ )
+ )
+
+ self.assertEquals(
+ [],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ @defer.inlineCallbacks
+ def test_drop_local(self):
+ yield self.datastore.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+ yield self.datastore.set_presence_list_accepted(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+
+ yield self.handler.drop(
+ observer_user=self.u_apple,
+ observed_user=self.u_banana,
+ )
+
+ self.assertEquals(
+ [],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ self.mock_stop.assert_called_with(
+ self.u_apple, target_user=self.u_banana)
+
+ @defer.inlineCallbacks
+ def test_drop_remote(self):
+ yield self.datastore.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_cabbage.to_string(),
+ )
+ yield self.datastore.set_presence_list_accepted(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_cabbage.to_string(),
+ )
+
+ yield self.handler.drop(
+ observer_user=self.u_apple,
+ observed_user=self.u_cabbage,
+ )
+
+ self.assertEquals(
+ [],
+ (yield self.datastore.get_presence_list(self.u_apple.localpart))
+ )
+
+ @defer.inlineCallbacks
+ def test_get_presence_list(self):
+ yield self.datastore.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+ yield self.datastore.set_presence_list_accepted(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+
+ presence = yield self.handler.get_presence_list(
+ observer_user=self.u_apple)
+
+ self.assertEquals([
+ {"observed_user": self.u_banana,
+ "presence": OFFLINE,
+ "accepted": 1},
+ ], presence)
+
+
+class PresencePushTestCase(MockedDatastorePresenceTestCase):
+ """ Tests steady-state presence status updates.
+
+ They assert that presence state update messages are pushed around the place
+ when users change state, presuming that the watches are all established.
+
+ These tests are MASSIVELY fragile currently as they poke internals of the
+ presence handler; namely the _local_pushmap and _remote_recvmap.
+ BE WARNED...
+ """
+ PRESENCE_LIST = {
+ 'apple': [ "@banana:test", "@clementine:test" ],
+ 'banana': [ "@apple:test" ],
+ }
+
+ @defer.inlineCallbacks
+ def test_push_local(self):
+ self.room_members = [self.u_apple, self.u_elderberry]
+
+ self.datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+
+ # TODO(paul): Gut-wrenching
+ self.handler._user_cachemap[self.u_apple] = UserPresenceCache()
+ self.handler._user_cachemap[self.u_apple].update(
+ {"presence": OFFLINE}, serial=0
+ )
+ apple_set = self.handler._local_pushmap.setdefault("apple", set())
+ apple_set.add(self.u_banana)
+ apple_set.add(self.u_clementine)
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.handler.set_state(self.u_apple, self.u_apple,
+ {"presence": ONLINE}
+ )
+
+ # Apple sees self-reflection even without room_id
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@apple:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }},
+ ],
+ msg="Presence event should be visible to self-reflection"
+ )
+
+ # Apple sees self-reflection
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ room_ids=[self.room_id],
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@apple:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }},
+ ],
+ msg="Presence event should be visible to self-reflection"
+ )
+
+ config = SourcePaginationConfig(from_key=1, to_key=0)
+ (chunk, _) = yield self.event_source.get_pagination_rows(
+ self.u_apple, config, None
+ )
+ self.assertEquals(chunk,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@apple:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }},
+ ]
+ )
+
+ # Banana sees it because of presence subscription
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_banana,
+ from_key=0,
+ room_ids=[self.room_id],
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@apple:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }},
+ ],
+ msg="Presence event should be visible to explicit subscribers"
+ )
+
+ # Elderberry sees it because of same room
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_elderberry,
+ from_key=0,
+ room_ids=[self.room_id],
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@apple:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }},
+ ],
+ msg="Presence event should be visible to other room members"
+ )
+
+ # Durian is not in the room, should not see this event
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_durian,
+ from_key=0,
+ room_ids=[],
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events, [],
+ msg="Presence event should not be visible to others"
+ )
+
+ presence = yield self.handler.get_presence_list(
+ observer_user=self.u_apple, accepted=True)
+
+ self.assertEquals(
+ [
+ {"observed_user": self.u_banana,
+ "presence": OFFLINE,
+ "accepted": True},
+ {"observed_user": self.u_clementine,
+ "presence": OFFLINE,
+ "accepted": True},
+ ],
+ presence
+ )
+
+ # TODO(paul): Gut-wrenching
+ banana_set = self.handler._local_pushmap.setdefault("banana", set())
+ banana_set.add(self.u_apple)
+
+ yield self.handler.set_state(self.u_banana, self.u_banana,
+ {"presence": ONLINE}
+ )
+
+ self.clock.advance_time(2)
+
+ presence = yield self.handler.get_presence_list(
+ observer_user=self.u_apple, accepted=True)
+
+ self.assertEquals([
+ {"observed_user": self.u_banana,
+ "presence": ONLINE,
+ "last_active_ago": 2000,
+ "accepted": True},
+ {"observed_user": self.u_clementine,
+ "presence": OFFLINE,
+ "accepted": True},
+ ], presence)
+
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=1,
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 2)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@banana:test",
+ "presence": ONLINE,
+ "last_active_ago": 2000
+ }},
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_push_remote(self):
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("farm",
+ path=ANY, # Can't guarantee which txn ID will be which
+ data=_expect_edu("farm", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@apple:test",
+ "presence": u"online",
+ "last_active_ago": 0},
+ ],
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY, # Can't guarantee which txn ID will be which
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@apple:test",
+ "presence": u"online",
+ "last_active_ago": 0},
+ ],
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ self.room_members = [self.u_apple, self.u_onion]
+
+ self.datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+
+ # TODO(paul): Gut-wrenching
+ self.handler._user_cachemap[self.u_apple] = UserPresenceCache()
+ self.handler._user_cachemap[self.u_apple].update(
+ {"presence": OFFLINE}, serial=0
+ )
+ apple_set = self.handler._remote_sendmap.setdefault("apple", set())
+ apple_set.add(self.u_potato.domain)
+
+ yield self.handler.set_state(self.u_apple, self.u_apple,
+ {"presence": ONLINE}
+ )
+
+ yield put_json.await_calls()
+
+ @defer.inlineCallbacks
+ def test_recv_remote(self):
+ self.room_members = [self.u_apple, self.u_banana, self.u_potato]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("elsewhere", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@potato:remote",
+ "presence": "online",
+ "last_active_ago": 1000},
+ ],
+ }
+ )
+ )
+
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ room_ids=[self.room_id],
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@potato:remote",
+ "presence": ONLINE,
+ "last_active_ago": 1000,
+ }}
+ ]
+ )
+
+ self.clock.advance_time(2)
+
+ state = yield self.handler.get_state(self.u_potato, self.u_apple)
+
+ self.assertEquals(
+ {"presence": ONLINE, "last_active_ago": 3000},
+ state
+ )
+
+ @defer.inlineCallbacks
+ def test_recv_remote_offline(self):
+ """ Various tests relating to SYN-261 """
+
+ self.room_members = [self.u_apple, self.u_banana, self.u_potato]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("elsewhere", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@potato:remote",
+ "presence": "offline"},
+ ],
+ }
+ )
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ room_ids=[self.room_id,]
+ )
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@potato:remote",
+ "presence": OFFLINE,
+ }}
+ ]
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000001/",
+ _make_edu_json("elsewhere", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@potato:remote",
+ "presence": "online"},
+ ],
+ }
+ )
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 2)
+
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ room_ids=[self.room_id,]
+ )
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@potato:remote",
+ "presence": ONLINE,
+ }}
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_join_room_local(self):
+ self.room_members = [self.u_apple, self.u_banana]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ # TODO(paul): Gut-wrenching
+ self.handler._user_cachemap[self.u_clementine] = UserPresenceCache()
+ self.handler._user_cachemap[self.u_clementine].update(
+ {
+ "presence": PresenceState.ONLINE,
+ "last_active": self.clock.time_msec(),
+ }, self.u_clementine
+ )
+
+ yield self.distributor.fire("user_joined_room", self.u_clementine,
+ self.room_id
+ )
+
+ self.room_members.append(self.u_clementine)
+
+ (events, _) = yield self.event_source.get_new_events(
+ user=self.u_apple,
+ from_key=0,
+ )
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ self.assertEquals(events,
+ [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@clementine:test",
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ }}
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_join_room_remote(self):
+ ## Sending local user state to a newly-joined remote user
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY, # Can't guarantee which txn ID will be which
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@apple:test",
+ "presence": "online"},
+ ],
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY, # Can't guarantee which txn ID will be which
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@banana:test",
+ "presence": "offline"},
+ ],
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ # TODO(paul): Gut-wrenching
+ self.handler._user_cachemap[self.u_apple] = UserPresenceCache()
+ self.handler._user_cachemap[self.u_apple].update(
+ {"presence": PresenceState.ONLINE}, self.u_apple)
+ self.room_members = [self.u_apple, self.u_banana]
+
+ yield self.distributor.fire("user_joined_room", self.u_potato,
+ self.room_id
+ )
+
+ yield put_json.await_calls()
+
+ ## Sending newly-joined local user state to remote users
+
+ put_json.expect_call_and_return(
+ call("remote",
+ path="/_matrix/federation/v1/send/1000002/",
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@clementine:test",
+ "presence": "online"},
+ ],
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ self.handler._user_cachemap[self.u_clementine] = UserPresenceCache()
+ self.handler._user_cachemap[self.u_clementine].update(
+ {"presence": ONLINE}, self.u_clementine)
+ self.room_members.append(self.u_potato)
+
+ yield self.distributor.fire("user_joined_room", self.u_clementine,
+ self.room_id
+ )
+
+ put_json.await_calls()
+
+
+class PresencePollingTestCase(MockedDatastorePresenceTestCase):
+ """ Tests presence status polling. """
+
+ # For this test, we have three local users; apple is watching and is
+ # watched by the other two, but the others don't watch each other.
+ # Additionally clementine is watching a remote user.
+ PRESENCE_LIST = {
+ 'apple': [ "@banana:test", "@clementine:test" ],
+ 'banana': [ "@apple:test" ],
+ 'clementine': [ "@apple:test", "@potato:remote" ],
+ 'fig': [ "@potato:remote" ],
+ }
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ yield super(PresencePollingTestCase, self).setUp()
+
+ self.mock_update_client = Mock()
+
+ def update(*args,**kwargs):
+ return defer.succeed(None)
+ self.mock_update_client.side_effect = update
+
+ self.handler.push_update_to_clients = self.mock_update_client
+
+ @defer.inlineCallbacks
+ def test_push_local(self):
+ # apple goes online
+ yield self.handler.set_state(
+ target_user=self.u_apple, auth_user=self.u_apple,
+ state={"presence": ONLINE}
+ )
+
+ # apple should see both banana and clementine currently offline
+ self.mock_update_client.assert_has_calls([
+ call(users_to_push=[self.u_apple]),
+ call(users_to_push=[self.u_apple]),
+ ], any_order=True)
+
+ # Gut-wrenching tests
+ self.assertTrue("banana" in self.handler._local_pushmap)
+ self.assertTrue(self.u_apple in self.handler._local_pushmap["banana"])
+ self.assertTrue("clementine" in self.handler._local_pushmap)
+ self.assertTrue(self.u_apple in self.handler._local_pushmap["clementine"])
+
+ self.mock_update_client.reset_mock()
+
+ # banana goes online
+ yield self.handler.set_state(
+ target_user=self.u_banana, auth_user=self.u_banana,
+ state={"presence": ONLINE}
+ )
+
+ # apple and banana should now both see each other online
+ self.mock_update_client.assert_has_calls([
+ call(users_to_push=set([self.u_apple]), room_ids=[]),
+ call(users_to_push=[self.u_banana]),
+ ], any_order=True)
+
+ self.assertTrue("apple" in self.handler._local_pushmap)
+ self.assertTrue(self.u_banana in self.handler._local_pushmap["apple"])
+
+ self.mock_update_client.reset_mock()
+
+ # apple goes offline
+ yield self.handler.set_state(
+ target_user=self.u_apple, auth_user=self.u_apple,
+ state={"presence": OFFLINE}
+ )
+
+ # banana should now be told apple is offline
+ self.mock_update_client.assert_has_calls([
+ call(users_to_push=set([self.u_banana, self.u_apple]), room_ids=[]),
+ ], any_order=True)
+
+ self.assertFalse("banana" in self.handler._local_pushmap)
+ self.assertFalse("clementine" in self.handler._local_pushmap)
+
+ @defer.inlineCallbacks
+ def test_remote_poll_send(self):
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY,
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "poll": [ "@potato:remote" ],
+ },
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY,
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [ {
+ "user_id": "@clementine:test",
+ "presence": OFFLINE,
+ }],
+ },
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ # clementine goes online
+ yield self.handler.set_state(
+ target_user=self.u_clementine, auth_user=self.u_clementine,
+ state={"presence": ONLINE}
+ )
+
+ yield put_json.await_calls()
+
+ # Gut-wrenching tests
+ self.assertTrue(self.u_potato in self.handler._remote_recvmap,
+ msg="expected potato to be in _remote_recvmap"
+ )
+ self.assertTrue(self.u_clementine in
+ self.handler._remote_recvmap[self.u_potato])
+
+
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY,
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [ {
+ "user_id": "@fig:test",
+ "presence": OFFLINE,
+ }],
+ },
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ # fig goes online; shouldn't send a second poll
+ yield self.handler.set_state(
+ target_user=self.u_fig, auth_user=self.u_fig,
+ state={"presence": ONLINE}
+ )
+
+ # reactor.iterate(delay=0)
+
+ yield put_json.await_calls()
+
+ # fig goes offline
+ yield self.handler.set_state(
+ target_user=self.u_fig, auth_user=self.u_fig,
+ state={"presence": OFFLINE}
+ )
+
+ reactor.iterate(delay=0)
+
+ put_json.assert_had_no_calls()
+
+ put_json.expect_call_and_return(
+ call("remote",
+ path=ANY,
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "unpoll": [ "@potato:remote" ],
+ },
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ # clementine goes offline
+ yield self.handler.set_state(
+ target_user=self.u_clementine, auth_user=self.u_clementine,
+ state={"presence": OFFLINE}
+ )
+
+ yield put_json.await_calls()
+
+ self.assertFalse(self.u_potato in self.handler._remote_recvmap,
+ msg="expected potato not to be in _remote_recvmap"
+ )
+
+ @defer.inlineCallbacks
+ def test_remote_poll_receive(self):
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("remote",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("remote", "m.presence",
+ content={
+ "push": [
+ {"user_id": "@banana:test",
+ "presence": "offline",
+ "status_msg": None},
+ ],
+ },
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("remote", "m.presence",
+ content={
+ "poll": [ "@banana:test" ],
+ },
+ )
+ )
+
+ yield put_json.await_calls()
+
+ # Gut-wrenching tests
+ self.assertTrue(self.u_banana in self.handler._remote_sendmap)
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000001/",
+ _make_edu_json("remote", "m.presence",
+ content={
+ "unpoll": [ "@banana:test" ],
+ }
+ )
+ )
+
+ # Gut-wrenching tests
+ self.assertFalse(self.u_banana in self.handler._remote_sendmap)
diff --git a/tests/handlers/test_presencelike.py b/tests/handlers/test_presencelike.py
new file mode 100644
index 00000000..19107cae
--- /dev/null
+++ b/tests/handlers/test_presencelike.py
@@ -0,0 +1,311 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This file contains tests of the "presence-like" data that is shared between
+presence and profiles; namely, the displayname and avatar_url."""
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, call, ANY, NonCallableMock
+
+from ..utils import MockClock, setup_test_homeserver
+
+from synapse.api.constants import PresenceState
+from synapse.handlers.presence import PresenceHandler
+from synapse.handlers.profile import ProfileHandler
+from synapse.types import UserID
+
+
+OFFLINE = PresenceState.OFFLINE
+UNAVAILABLE = PresenceState.UNAVAILABLE
+ONLINE = PresenceState.ONLINE
+
+
+class MockReplication(object):
+ def __init__(self):
+ self.edu_handlers = {}
+
+ def register_edu_handler(self, edu_type, handler):
+ self.edu_handlers[edu_type] = handler
+
+ def register_query_handler(self, query_type, handler):
+ pass
+
+ def received_edu(self, origin, edu_type, content):
+ self.edu_handlers[edu_type](origin, content)
+
+
+class PresenceAndProfileHandlers(object):
+ def __init__(self, hs):
+ self.presence_handler = PresenceHandler(hs)
+ self.profile_handler = ProfileHandler(hs)
+
+
+class PresenceProfilelikeDataTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver(
+ clock=MockClock(),
+ datastore=Mock(spec=[
+ "set_presence_state",
+ "is_presence_visible",
+ "set_profile_displayname",
+ "get_rooms_for_user",
+ ]),
+ handlers=None,
+ resource_for_federation=Mock(),
+ http_client=None,
+ replication_layer=MockReplication(),
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+ hs.handlers = PresenceAndProfileHandlers(hs)
+
+ self.datastore = hs.get_datastore()
+
+ self.replication = hs.get_replication_layer()
+ self.replication.send_edu = Mock()
+
+ def send_edu(*args, **kwargs):
+ # print "send_edu: %s, %s" % (args, kwargs)
+ return defer.succeed((200, "OK"))
+ self.replication.send_edu.side_effect = send_edu
+
+ def get_profile_displayname(user_localpart):
+ return defer.succeed("Frank")
+ self.datastore.get_profile_displayname = get_profile_displayname
+
+ def is_presence_visible(*args, **kwargs):
+ return defer.succeed(False)
+ self.datastore.is_presence_visible = is_presence_visible
+
+ def get_profile_avatar_url(user_localpart):
+ return defer.succeed("http://foo")
+ self.datastore.get_profile_avatar_url = get_profile_avatar_url
+
+ self.presence_list = [
+ {"observed_user_id": "@banana:test", "accepted": True},
+ {"observed_user_id": "@clementine:test", "accepted": True},
+ ]
+ def get_presence_list(user_localpart, accepted=None):
+ return defer.succeed(self.presence_list)
+ self.datastore.get_presence_list = get_presence_list
+
+ def user_rooms_intersect(userlist):
+ return defer.succeed(False)
+ self.datastore.user_rooms_intersect = user_rooms_intersect
+
+ self.handlers = hs.get_handlers()
+
+ self.mock_update_client = Mock()
+ def update(*args, **kwargs):
+ # print "mock_update_client: %s, %s" %(args, kwargs)
+ return defer.succeed(None)
+ self.mock_update_client.side_effect = update
+
+ self.handlers.presence_handler.push_update_to_clients = (
+ self.mock_update_client)
+
+ hs.handlers.room_member_handler = Mock(spec=[
+ "get_joined_rooms_for_user",
+ ])
+ hs.handlers.room_member_handler.get_joined_rooms_for_user = (
+ lambda u: defer.succeed([]))
+
+ # Some local users to test with
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+ self.u_clementine = UserID.from_string("@clementine:test")
+
+ # Remote user
+ self.u_potato = UserID.from_string("@potato:remote")
+
+ self.mock_get_joined = (
+ self.datastore.get_rooms_for_user
+ )
+
+ @defer.inlineCallbacks
+ def test_set_my_state(self):
+ self.presence_list = [
+ {"observed_user_id": "@banana:test", "accepted": True},
+ {"observed_user_id": "@clementine:test", "accepted": True},
+ ]
+
+ mocked_set = self.datastore.set_presence_state
+ mocked_set.return_value = defer.succeed({"state": OFFLINE})
+
+ yield self.handlers.presence_handler.set_state(
+ target_user=self.u_apple, auth_user=self.u_apple,
+ state={"presence": UNAVAILABLE, "status_msg": "Away"})
+
+ mocked_set.assert_called_with("apple",
+ {"state": UNAVAILABLE, "status_msg": "Away"}
+ )
+
+ @defer.inlineCallbacks
+ def test_push_local(self):
+ def get_joined(*args):
+ return defer.succeed([])
+
+ self.mock_get_joined.side_effect = get_joined
+
+ self.presence_list = [
+ {"observed_user_id": "@banana:test", "accepted": True},
+ {"observed_user_id": "@clementine:test", "accepted": True},
+ ]
+
+ self.datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+
+ # TODO(paul): Gut-wrenching
+ from synapse.handlers.presence import UserPresenceCache
+ self.handlers.presence_handler._user_cachemap[self.u_apple] = (
+ UserPresenceCache()
+ )
+ self.handlers.presence_handler._user_cachemap[self.u_apple].update(
+ {"presence": OFFLINE}, serial=0
+ )
+ apple_set = self.handlers.presence_handler._local_pushmap.setdefault(
+ "apple", set())
+ apple_set.add(self.u_banana)
+ apple_set.add(self.u_clementine)
+
+ yield self.handlers.presence_handler.set_state(self.u_apple,
+ self.u_apple, {"presence": ONLINE}
+ )
+ yield self.handlers.presence_handler.set_state(self.u_banana,
+ self.u_banana, {"presence": ONLINE}
+ )
+
+ presence = yield self.handlers.presence_handler.get_presence_list(
+ observer_user=self.u_apple, accepted=True)
+
+ self.assertEquals([
+ {"observed_user": self.u_banana,
+ "presence": ONLINE,
+ "last_active_ago": 0,
+ "displayname": "Frank",
+ "avatar_url": "http://foo",
+ "accepted": True},
+ {"observed_user": self.u_clementine,
+ "presence": OFFLINE,
+ "accepted": True}
+ ], presence)
+
+ self.mock_update_client.assert_has_calls([
+ call(
+ users_to_push={self.u_apple, self.u_banana, self.u_clementine},
+ room_ids=[]
+ ),
+ ], any_order=True)
+
+ self.mock_update_client.reset_mock()
+
+ self.datastore.set_profile_displayname.return_value = defer.succeed(
+ None)
+
+ yield self.handlers.profile_handler.set_displayname(self.u_apple,
+ self.u_apple, "I am an Apple")
+
+ self.mock_update_client.assert_has_calls([
+ call(
+ users_to_push={self.u_apple, self.u_banana, self.u_clementine},
+ room_ids=[],
+ ),
+ ], any_order=True)
+
+ @defer.inlineCallbacks
+ def test_push_remote(self):
+ self.presence_list = [
+ {"observed_user_id": "@potato:remote", "accepted": True},
+ ]
+
+ self.datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+
+ # TODO(paul): Gut-wrenching
+ from synapse.handlers.presence import UserPresenceCache
+ self.handlers.presence_handler._user_cachemap[self.u_apple] = (
+ UserPresenceCache()
+ )
+ self.handlers.presence_handler._user_cachemap[self.u_apple].update(
+ {"presence": OFFLINE}, serial=0
+ )
+ apple_set = self.handlers.presence_handler._remote_sendmap.setdefault(
+ "apple", set())
+ apple_set.add(self.u_potato.domain)
+
+ yield self.handlers.presence_handler.set_state(self.u_apple,
+ self.u_apple, {"presence": ONLINE}
+ )
+
+ self.replication.send_edu.assert_called_with(
+ destination="remote",
+ edu_type="m.presence",
+ content={
+ "push": [
+ {"user_id": "@apple:test",
+ "presence": "online",
+ "last_active_ago": 0,
+ "displayname": "Frank",
+ "avatar_url": "http://foo"},
+ ],
+ },
+ )
+
+ @defer.inlineCallbacks
+ def test_recv_remote(self):
+ self.presence_list = [
+ {"observed_user_id": "@banana:test"},
+ {"observed_user_id": "@clementine:test"},
+ ]
+
+ # TODO(paul): Gut-wrenching
+ potato_set = self.handlers.presence_handler._remote_recvmap.setdefault(
+ self.u_potato, set()
+ )
+ potato_set.add(self.u_apple)
+
+ yield self.replication.received_edu(
+ "remote", "m.presence", {
+ "push": [
+ {"user_id": "@potato:remote",
+ "presence": "online",
+ "displayname": "Frank",
+ "avatar_url": "http://foo"},
+ ],
+ }
+ )
+
+ self.mock_update_client.assert_called_with(
+ users_to_push=set([self.u_apple]),
+ room_ids=[],
+ )
+
+ state = yield self.handlers.presence_handler.get_state(self.u_potato,
+ self.u_apple)
+
+ self.assertEquals(
+ {"presence": ONLINE,
+ "displayname": "Frank",
+ "avatar_url": "http://foo"},
+ state)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
new file mode 100644
index 00000000..31f03d73
--- /dev/null
+++ b/tests/handlers/test_profile.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, NonCallableMock
+
+from synapse.api.errors import AuthError
+from synapse.handlers.profile import ProfileHandler
+from synapse.types import UserID
+
+from tests.utils import setup_test_homeserver
+
+
+class ProfileHandlers(object):
+ def __init__(self, hs):
+ self.profile_handler = ProfileHandler(hs)
+
+
+class ProfileTestCase(unittest.TestCase):
+ """ Tests profile management. """
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_federation = Mock(spec=[
+ "make_query",
+ ])
+
+ self.query_handlers = {}
+ def register_query_handler(query_type, handler):
+ self.query_handlers[query_type] = handler
+ self.mock_federation.register_query_handler = register_query_handler
+
+ hs = yield setup_test_homeserver(
+ http_client=None,
+ handlers=None,
+ resource_for_federation=Mock(),
+ replication_layer=self.mock_federation,
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ])
+ )
+
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.handlers = ProfileHandlers(hs)
+
+ self.store = hs.get_datastore()
+
+ self.frank = UserID.from_string("@1234ABCD:test")
+ self.bob = UserID.from_string("@4567:test")
+ self.alice = UserID.from_string("@alice:remote")
+
+ yield self.store.create_profile(self.frank.localpart)
+
+ self.handler = hs.get_handlers().profile_handler
+
+ # TODO(paul): Icky signal declarings.. booo
+ hs.get_distributor().declare("changed_presencelike_data")
+
+ @defer.inlineCallbacks
+ def test_get_my_name(self):
+ yield self.store.set_profile_displayname(
+ self.frank.localpart, "Frank"
+ )
+
+ displayname = yield self.handler.get_displayname(self.frank)
+
+ self.assertEquals("Frank", displayname)
+
+ @defer.inlineCallbacks
+ def test_set_my_name(self):
+ yield self.handler.set_displayname(self.frank, self.frank, "Frank Jr.")
+
+ self.assertEquals(
+ (yield self.store.get_profile_displayname(self.frank.localpart)),
+ "Frank Jr."
+ )
+
+ @defer.inlineCallbacks
+ def test_set_my_name_noauth(self):
+ d = self.handler.set_displayname(self.frank, self.bob, "Frank Jr.")
+
+ yield self.assertFailure(d, AuthError)
+
+ @defer.inlineCallbacks
+ def test_get_other_name(self):
+ self.mock_federation.make_query.return_value = defer.succeed(
+ {"displayname": "Alice"}
+ )
+
+ displayname = yield self.handler.get_displayname(self.alice)
+
+ self.assertEquals(displayname, "Alice")
+ self.mock_federation.make_query.assert_called_with(
+ destination="remote",
+ query_type="profile",
+ args={"user_id": "@alice:remote", "field": "displayname"}
+ )
+
+ @defer.inlineCallbacks
+ def test_incoming_fed_query(self):
+ yield self.store.create_profile("caroline")
+ yield self.store.set_profile_displayname("caroline", "Caroline")
+
+ response = yield self.query_handlers["profile"](
+ {"user_id": "@caroline:test", "field": "displayname"}
+ )
+
+ self.assertEquals({"displayname": "Caroline"}, response)
+
+ @defer.inlineCallbacks
+ def test_get_my_avatar(self):
+ yield self.store.set_profile_avatar_url(
+ self.frank.localpart, "http://my.server/me.png"
+ )
+
+ avatar_url = yield self.handler.get_avatar_url(self.frank)
+
+ self.assertEquals("http://my.server/me.png", avatar_url)
+
+ @defer.inlineCallbacks
+ def test_set_my_avatar(self):
+ yield self.handler.set_avatar_url(self.frank, self.frank,
+ "http://my.server/pic.gif")
+
+ self.assertEquals(
+ (yield self.store.get_profile_avatar_url(self.frank.localpart)),
+ "http://my.server/pic.gif"
+ )
diff --git a/tests/handlers/test_room.py b/tests/handlers/test_room.py
new file mode 100644
index 00000000..2a7553f9
--- /dev/null
+++ b/tests/handlers/test_room.py
@@ -0,0 +1,404 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+from .. import unittest
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.handlers.room import RoomMemberHandler, RoomCreationHandler
+from synapse.handlers.profile import ProfileHandler
+from synapse.types import UserID
+from ..utils import setup_test_homeserver
+
+from mock import Mock, NonCallableMock
+
+
+class RoomMemberHandlerTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hostname = "red"
+ hs = yield setup_test_homeserver(
+ self.hostname,
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ datastore=NonCallableMock(spec_set=[
+ "persist_event",
+ "get_room_member",
+ "get_room",
+ "store_room",
+ "get_latest_events_in_room",
+ "add_event_hashes",
+ ]),
+ resource_for_federation=NonCallableMock(),
+ http_client=NonCallableMock(spec_set=[]),
+ notifier=NonCallableMock(spec_set=["on_new_room_event"]),
+ handlers=NonCallableMock(spec_set=[
+ "room_member_handler",
+ "profile_handler",
+ "federation_handler",
+ ]),
+ auth=NonCallableMock(spec_set=[
+ "check",
+ "add_auth_events",
+ "check_host_in_room",
+ ]),
+ state_handler=NonCallableMock(spec_set=[
+ "compute_event_context",
+ "get_current_state",
+ ]),
+ )
+
+ self.federation = NonCallableMock(spec_set=[
+ "handle_new_event",
+ "send_invite",
+ "get_state_for_room",
+ ])
+
+ self.datastore = hs.get_datastore()
+ self.handlers = hs.get_handlers()
+ self.notifier = hs.get_notifier()
+ self.state_handler = hs.get_state_handler()
+ self.distributor = hs.get_distributor()
+ self.auth = hs.get_auth()
+ self.hs = hs
+
+ self.handlers.federation_handler = self.federation
+
+ self.distributor.declare("collect_presencelike_data")
+
+ self.handlers.room_member_handler = RoomMemberHandler(self.hs)
+ self.handlers.profile_handler = ProfileHandler(self.hs)
+ self.room_member_handler = self.handlers.room_member_handler
+
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ self.datastore.persist_event.return_value = (1,1)
+ self.datastore.add_event_hashes.return_value = []
+
+ @defer.inlineCallbacks
+ def test_invite(self):
+ room_id = "!foo:red"
+ user_id = "@bob:red"
+ target_user_id = "@red:blue"
+ content = {"membership": Membership.INVITE}
+
+ builder = self.hs.get_event_builder_factory().new({
+ "type": EventTypes.Member,
+ "sender": user_id,
+ "state_key": target_user_id,
+ "room_id": room_id,
+ "content": content,
+ })
+
+ self.datastore.get_latest_events_in_room.return_value = (
+ defer.succeed([])
+ )
+
+ def annotate(_):
+ ctx = Mock()
+ ctx.current_state = {
+ (EventTypes.Member, "@alice:green"): self._create_member(
+ user_id="@alice:green",
+ room_id=room_id,
+ ),
+ (EventTypes.Member, "@bob:red"): self._create_member(
+ user_id="@bob:red",
+ room_id=room_id,
+ ),
+ }
+ ctx.prev_state_events = []
+
+ return defer.succeed(ctx)
+
+ self.state_handler.compute_event_context.side_effect = annotate
+
+ def add_auth(_, ctx):
+ ctx.auth_events = ctx.current_state[
+ (EventTypes.Member, "@bob:red")
+ ]
+
+ return defer.succeed(True)
+ self.auth.add_auth_events.side_effect = add_auth
+
+ def send_invite(domain, event):
+ return defer.succeed(event)
+
+ self.federation.send_invite.side_effect = send_invite
+
+ room_handler = self.room_member_handler
+ event, context = yield room_handler._create_new_client_event(
+ builder
+ )
+
+ yield room_handler.change_membership(event, context)
+
+ self.state_handler.compute_event_context.assert_called_once_with(
+ builder
+ )
+
+ self.auth.add_auth_events.assert_called_once_with(
+ builder, context
+ )
+
+ self.federation.send_invite.assert_called_once_with(
+ "blue", event,
+ )
+
+ self.datastore.persist_event.assert_called_once_with(
+ event, context=context,
+ )
+ self.notifier.on_new_room_event.assert_called_once_with(
+ event, 1, 1, extra_users=[UserID.from_string(target_user_id)]
+ )
+ self.assertFalse(self.datastore.get_room.called)
+ self.assertFalse(self.datastore.store_room.called)
+ self.assertFalse(self.federation.get_state_for_room.called)
+
+ @defer.inlineCallbacks
+ def test_simple_join(self):
+ room_id = "!foo:red"
+ user_id = "@bob:red"
+ user = UserID.from_string(user_id)
+
+ join_signal_observer = Mock()
+ self.distributor.observe("user_joined_room", join_signal_observer)
+
+ builder = self.hs.get_event_builder_factory().new({
+ "type": EventTypes.Member,
+ "sender": user_id,
+ "state_key": user_id,
+ "room_id": room_id,
+ "content": {"membership": Membership.JOIN},
+ })
+
+ self.datastore.get_latest_events_in_room.return_value = (
+ defer.succeed([])
+ )
+
+ def annotate(_):
+ ctx = Mock()
+ ctx.current_state = {
+ (EventTypes.Member, "@bob:red"): self._create_member(
+ user_id="@bob:red",
+ room_id=room_id,
+ membership=Membership.INVITE
+ ),
+ }
+ ctx.prev_state_events = []
+
+ return defer.succeed(ctx)
+
+ self.state_handler.compute_event_context.side_effect = annotate
+
+ def add_auth(_, ctx):
+ ctx.auth_events = ctx.current_state[
+ (EventTypes.Member, "@bob:red")
+ ]
+
+ return defer.succeed(True)
+ self.auth.add_auth_events.side_effect = add_auth
+
+ room_handler = self.room_member_handler
+ event, context = yield room_handler._create_new_client_event(
+ builder
+ )
+
+ # Actual invocation
+ yield room_handler.change_membership(event, context)
+
+ self.federation.handle_new_event.assert_called_once_with(
+ event, destinations=set()
+ )
+
+ self.datastore.persist_event.assert_called_once_with(
+ event, context=context
+ )
+ self.notifier.on_new_room_event.assert_called_once_with(
+ event, 1, 1, extra_users=[user]
+ )
+
+ join_signal_observer.assert_called_with(
+ user=user, room_id=room_id
+ )
+
+ def _create_member(self, user_id, room_id, membership=Membership.JOIN):
+ builder = self.hs.get_event_builder_factory().new({
+ "type": EventTypes.Member,
+ "sender": user_id,
+ "state_key": user_id,
+ "room_id": room_id,
+ "content": {"membership": membership},
+ })
+
+ return builder.build()
+
+ @defer.inlineCallbacks
+ def test_simple_leave(self):
+ room_id = "!foo:red"
+ user_id = "@bob:red"
+ user = UserID.from_string(user_id)
+
+ builder = self.hs.get_event_builder_factory().new({
+ "type": EventTypes.Member,
+ "sender": user_id,
+ "state_key": user_id,
+ "room_id": room_id,
+ "content": {"membership": Membership.LEAVE},
+ })
+
+ self.datastore.get_latest_events_in_room.return_value = (
+ defer.succeed([])
+ )
+
+ def annotate(_):
+ ctx = Mock()
+ ctx.current_state = {
+ (EventTypes.Member, "@bob:red"): self._create_member(
+ user_id="@bob:red",
+ room_id=room_id,
+ membership=Membership.JOIN
+ ),
+ }
+ ctx.prev_state_events = []
+
+ return defer.succeed(ctx)
+
+ self.state_handler.compute_event_context.side_effect = annotate
+
+ def add_auth(_, ctx):
+ ctx.auth_events = ctx.current_state[
+ (EventTypes.Member, "@bob:red")
+ ]
+
+ return defer.succeed(True)
+ self.auth.add_auth_events.side_effect = add_auth
+
+ room_handler = self.room_member_handler
+ event, context = yield room_handler._create_new_client_event(
+ builder
+ )
+
+ leave_signal_observer = Mock()
+ self.distributor.observe("user_left_room", leave_signal_observer)
+
+ # Actual invocation
+ yield room_handler.change_membership(event, context)
+
+ self.federation.handle_new_event.assert_called_once_with(
+ event, destinations=set(['red'])
+ )
+
+ self.datastore.persist_event.assert_called_once_with(
+ event, context=context
+ )
+ self.notifier.on_new_room_event.assert_called_once_with(
+ event, 1, 1, extra_users=[user]
+ )
+
+ leave_signal_observer.assert_called_with(
+ user=user, room_id=room_id
+ )
+
+
+class RoomCreationTest(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hostname = "red"
+
+ hs = yield setup_test_homeserver(
+ self.hostname,
+ datastore=NonCallableMock(spec_set=[
+ "store_room",
+ "snapshot_room",
+ "persist_event",
+ "get_joined_hosts_for_room",
+ ]),
+ http_client=NonCallableMock(spec_set=[]),
+ notifier=NonCallableMock(spec_set=["on_new_room_event"]),
+ handlers=NonCallableMock(spec_set=[
+ "room_creation_handler",
+ "message_handler",
+ ]),
+ auth=NonCallableMock(spec_set=["check", "add_auth_events"]),
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ )
+
+ self.federation = NonCallableMock(spec_set=[
+ "handle_new_event",
+ ])
+
+ self.handlers = hs.get_handlers()
+
+ self.handlers.room_creation_handler = RoomCreationHandler(hs)
+ self.room_creation_handler = self.handlers.room_creation_handler
+
+ self.message_handler = self.handlers.message_handler
+
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ @defer.inlineCallbacks
+ def test_room_creation(self):
+ user_id = "@foo:red"
+ room_id = "!bobs_room:red"
+ config = {"visibility": "private"}
+
+ yield self.room_creation_handler.create_room(
+ user_id=user_id,
+ room_id=room_id,
+ config=config,
+ )
+
+ self.assertTrue(self.message_handler.create_and_send_event.called)
+
+ event_dicts = [
+ e[0][0]
+ for e in self.message_handler.create_and_send_event.call_args_list
+ ]
+
+ self.assertTrue(len(event_dicts) > 3)
+
+ self.assertDictContainsSubset(
+ {
+ "type": EventTypes.Create,
+ "sender": user_id,
+ "room_id": room_id,
+ },
+ event_dicts[0]
+ )
+
+ self.assertEqual(user_id, event_dicts[0]["content"]["creator"])
+
+ self.assertDictContainsSubset(
+ {
+ "type": EventTypes.Member,
+ "sender": user_id,
+ "room_id": room_id,
+ "state_key": user_id,
+ },
+ event_dicts[1]
+ )
+
+ self.assertEqual(
+ Membership.JOIN,
+ event_dicts[1]["content"]["membership"]
+ )
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
new file mode 100644
index 00000000..2d7ba435
--- /dev/null
+++ b/tests/handlers/test_typing.py
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, call, ANY
+import json
+
+from ..utils import (
+ MockHttpResource, MockClock, DeferredMockCallable, setup_test_homeserver
+)
+
+from synapse.api.errors import AuthError
+from synapse.handlers.typing import TypingNotificationHandler
+
+from synapse.storage.transactions import DestinationsTable
+from synapse.types import UserID
+
+
+def _expect_edu(destination, edu_type, content, origin="test"):
+ return {
+ "origin": origin,
+ "origin_server_ts": 1000000,
+ "pdus": [],
+ "edus": [
+ {
+ "edu_type": edu_type,
+ "content": content,
+ }
+ ],
+ "pdu_failures": [],
+ }
+
+
+def _make_edu_json(origin, edu_type, content):
+ return json.dumps(_expect_edu("test", edu_type, content, origin=origin))
+
+
+class JustTypingNotificationHandlers(object):
+ def __init__(self, hs):
+ self.typing_notification_handler = TypingNotificationHandler(hs)
+
+
+class TypingNotificationsTestCase(unittest.TestCase):
+ """Tests typing notifications to rooms."""
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.clock = MockClock()
+
+ self.mock_http_client = Mock(spec=[])
+ self.mock_http_client.put_json = DeferredMockCallable()
+
+ self.mock_federation_resource = MockHttpResource()
+
+ mock_notifier = Mock(spec=["on_new_event"])
+ self.on_new_event = mock_notifier.on_new_event
+
+ self.auth = Mock(spec=[])
+
+ hs = yield setup_test_homeserver(
+ auth=self.auth,
+ clock=self.clock,
+ datastore=Mock(spec=[
+ # Bits that Federation needs
+ "prep_send_transaction",
+ "delivered_txn",
+ "get_received_txn_response",
+ "set_received_txn_response",
+ "get_destination_retry_timings",
+ ]),
+ handlers=None,
+ notifier=mock_notifier,
+ resource_for_client=Mock(),
+ resource_for_federation=self.mock_federation_resource,
+ http_client=self.mock_http_client,
+ keyring=Mock(),
+ )
+ hs.handlers = JustTypingNotificationHandlers(hs)
+
+ self.handler = hs.get_handlers().typing_notification_handler
+
+ self.event_source = hs.get_event_sources().sources["typing"]
+
+ self.datastore = hs.get_datastore()
+ retry_timings_res = {
+ "destination": "",
+ "retry_last_ts": 0,
+ "retry_interval": 0,
+ }
+ self.datastore.get_destination_retry_timings.return_value = (
+ defer.succeed(retry_timings_res)
+ )
+
+ def get_received_txn_response(*args):
+ return defer.succeed(None)
+ self.datastore.get_received_txn_response = get_received_txn_response
+
+ self.room_id = "a-room"
+
+ # Mock the RoomMemberHandler
+ hs.handlers.room_member_handler = Mock(spec=[])
+ self.room_member_handler = hs.handlers.room_member_handler
+
+ self.room_members = []
+
+ def get_rooms_for_user(user):
+ if user in self.room_members:
+ return defer.succeed([self.room_id])
+ else:
+ return defer.succeed([])
+ self.room_member_handler.get_rooms_for_user = get_rooms_for_user
+
+ def get_room_members(room_id):
+ if room_id == self.room_id:
+ return defer.succeed(self.room_members)
+ else:
+ return defer.succeed([])
+ self.room_member_handler.get_room_members = get_room_members
+
+ def get_joined_rooms_for_user(user):
+ if user in self.room_members:
+ return defer.succeed([self.room_id])
+ else:
+ return defer.succeed([])
+ self.room_member_handler.get_joined_rooms_for_user = get_joined_rooms_for_user
+
+ @defer.inlineCallbacks
+ def fetch_room_distributions_into(room_id, localusers=None,
+ remotedomains=None, ignore_user=None):
+
+ members = yield get_room_members(room_id)
+ for member in members:
+ if ignore_user is not None and member == ignore_user:
+ continue
+
+ if hs.is_mine(member):
+ if localusers is not None:
+ localusers.add(member)
+ else:
+ if remotedomains is not None:
+ remotedomains.add(member.domain)
+ self.room_member_handler.fetch_room_distributions_into = (
+ fetch_room_distributions_into)
+
+ def check_joined_room(room_id, user_id):
+ if user_id not in [u.to_string() for u in self.room_members]:
+ raise AuthError(401, "User is not in the room")
+
+ self.auth.check_joined_room = check_joined_room
+
+ # Some local users to test with
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+
+ # Remote user
+ self.u_onion = UserID.from_string("@onion:farm")
+
+ @defer.inlineCallbacks
+ def test_started_typing_local(self):
+ self.room_members = [self.u_apple, self.u_banana]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.handler.started_typing(
+ target_user=self.u_apple,
+ auth_user=self.u_apple,
+ room_id=self.room_id,
+ timeout=20000,
+ )
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 1, rooms=[self.room_id]),
+ ])
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=0,
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [self.u_apple.to_string()],
+ }},
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_started_typing_remote_send(self):
+ self.room_members = [self.u_apple, self.u_onion]
+
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("farm",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("farm", "m.typing",
+ content={
+ "room_id": self.room_id,
+ "user_id": self.u_apple.to_string(),
+ "typing": True,
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ yield self.handler.started_typing(
+ target_user=self.u_apple,
+ auth_user=self.u_apple,
+ room_id=self.room_id,
+ timeout=20000,
+ )
+
+ yield put_json.await_calls()
+
+ @defer.inlineCallbacks
+ def test_started_typing_remote_recv(self):
+ self.room_members = [self.u_apple, self.u_onion]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.mock_federation_resource.trigger("PUT",
+ "/_matrix/federation/v1/send/1000000/",
+ _make_edu_json("farm", "m.typing",
+ content={
+ "room_id": self.room_id,
+ "user_id": self.u_onion.to_string(),
+ "typing": True,
+ }
+ )
+ )
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 1, rooms=[self.room_id]),
+ ])
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=0
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [self.u_onion.to_string()],
+ }},
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_stopped_typing(self):
+ self.room_members = [self.u_apple, self.u_banana, self.u_onion]
+
+ put_json = self.mock_http_client.put_json
+ put_json.expect_call_and_return(
+ call("farm",
+ path="/_matrix/federation/v1/send/1000000/",
+ data=_expect_edu("farm", "m.typing",
+ content={
+ "room_id": self.room_id,
+ "user_id": self.u_apple.to_string(),
+ "typing": False,
+ }
+ ),
+ json_data_callback=ANY,
+ ),
+ defer.succeed((200, "OK"))
+ )
+
+ # Gut-wrenching
+ from synapse.handlers.typing import RoomMember
+ member = RoomMember(self.room_id, self.u_apple)
+ self.handler._member_typing_until[member] = 1002000
+ self.handler._member_typing_timer[member] = (
+ self.clock.call_later(1002, lambda: 0)
+ )
+ self.handler._room_typing[self.room_id] = set((self.u_apple,))
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.handler.stopped_typing(
+ target_user=self.u_apple,
+ auth_user=self.u_apple,
+ room_id=self.room_id,
+ )
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 1, rooms=[self.room_id]),
+ ])
+
+ yield put_json.await_calls()
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=0,
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [],
+ }},
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_typing_timeout(self):
+ self.room_members = [self.u_apple, self.u_banana]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ yield self.handler.started_typing(
+ target_user=self.u_apple,
+ auth_user=self.u_apple,
+ room_id=self.room_id,
+ timeout=10000,
+ )
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 1, rooms=[self.room_id]),
+ ])
+ self.on_new_event.reset_mock()
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=0,
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [self.u_apple.to_string()],
+ }},
+ ]
+ )
+
+ self.clock.advance_time(11)
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 2, rooms=[self.room_id]),
+ ])
+
+ self.assertEquals(self.event_source.get_current_key(), 2)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=1,
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [],
+ }},
+ ]
+ )
+
+ # SYN-230 - see if we can still set after timeout
+
+ yield self.handler.started_typing(
+ target_user=self.u_apple,
+ auth_user=self.u_apple,
+ room_id=self.room_id,
+ timeout=10000,
+ )
+
+ self.on_new_event.assert_has_calls([
+ call('typing_key', 3, rooms=[self.room_id]),
+ ])
+ self.on_new_event.reset_mock()
+
+ self.assertEquals(self.event_source.get_current_key(), 3)
+ events = yield self.event_source.get_new_events(
+ room_ids=[self.room_id],
+ from_key=0,
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [self.u_apple.to_string()],
+ }},
+ ]
+ )
diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/metrics/__init__.py
diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py
new file mode 100644
index 00000000..60090142
--- /dev/null
+++ b/tests/metrics/test_metric.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+
+from synapse.metrics.metric import (
+ CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
+)
+
+
+class CounterMetricTestCase(unittest.TestCase):
+
+ def test_scalar(self):
+ counter = CounterMetric("scalar")
+
+ self.assertEquals(counter.render(), [
+ 'scalar 0',
+ ])
+
+ counter.inc()
+
+ self.assertEquals(counter.render(), [
+ 'scalar 1',
+ ])
+
+ counter.inc_by(2)
+
+ self.assertEquals(counter.render(), [
+ 'scalar 3'
+ ])
+
+ def test_vector(self):
+ counter = CounterMetric("vector", labels=["method"])
+
+ # Empty counter doesn't yet know what values it has
+ self.assertEquals(counter.render(), [])
+
+ counter.inc("GET")
+
+ self.assertEquals(counter.render(), [
+ 'vector{method="GET"} 1',
+ ])
+
+ counter.inc("GET")
+ counter.inc("PUT")
+
+ self.assertEquals(counter.render(), [
+ 'vector{method="GET"} 2',
+ 'vector{method="PUT"} 1',
+ ])
+
+
+class CallbackMetricTestCase(unittest.TestCase):
+
+ def test_scalar(self):
+ d = dict()
+
+ metric = CallbackMetric("size", lambda: len(d))
+
+ self.assertEquals(metric.render(), [
+ 'size 0',
+ ])
+
+ d["key"] = "value"
+
+ self.assertEquals(metric.render(), [
+ 'size 1',
+ ])
+
+ def test_vector(self):
+ vals = dict()
+
+ metric = CallbackMetric("values", lambda: vals, labels=["type"])
+
+ self.assertEquals(metric.render(), [])
+
+ # Keys have to be tuples, even if they're 1-element
+ vals[("foo",)] = 1
+ vals[("bar",)] = 2
+
+ self.assertEquals(metric.render(), [
+ 'values{type="bar"} 2',
+ 'values{type="foo"} 1',
+ ])
+
+
+class DistributionMetricTestCase(unittest.TestCase):
+
+ def test_scalar(self):
+ metric = DistributionMetric("thing")
+
+ self.assertEquals(metric.render(), [
+ 'thing:count 0',
+ 'thing:total 0',
+ ])
+
+ metric.inc_by(500)
+
+ self.assertEquals(metric.render(), [
+ 'thing:count 1',
+ 'thing:total 500',
+ ])
+
+ def test_vector(self):
+ metric = DistributionMetric("queries", labels=["verb"])
+
+ self.assertEquals(metric.render(), [])
+
+ metric.inc_by(300, "SELECT")
+ metric.inc_by(200, "SELECT")
+ metric.inc_by(800, "INSERT")
+
+ self.assertEquals(metric.render(), [
+ 'queries:count{verb="INSERT"} 1',
+ 'queries:count{verb="SELECT"} 2',
+ 'queries:total{verb="INSERT"} 800',
+ 'queries:total{verb="SELECT"} 500',
+ ])
+
+
+class CacheMetricTestCase(unittest.TestCase):
+
+ def test_cache(self):
+ d = dict()
+
+ metric = CacheMetric("cache", lambda: len(d))
+
+ self.assertEquals(metric.render(), [
+ 'cache:hits 0',
+ 'cache:total 0',
+ 'cache:size 0',
+ ])
+
+ metric.inc_misses()
+ d["key"] = "value"
+
+ self.assertEquals(metric.render(), [
+ 'cache:hits 0',
+ 'cache:total 1',
+ 'cache:size 1',
+ ])
+
+ metric.inc_hits()
+
+ self.assertEquals(metric.render(), [
+ 'cache:hits 1',
+ 'cache:total 2',
+ 'cache:size 1',
+ ])
diff --git a/tests/rest/__init__.py b/tests/rest/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/tests/rest/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/rest/client/__init__.py b/tests/rest/client/__init__.py
new file mode 100644
index 00000000..1a84d94c
--- /dev/null
+++ b/tests/rest/client/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/rest/client/v1/__init__.py b/tests/rest/client/v1/__init__.py
new file mode 100644
index 00000000..9bff9ec1
--- /dev/null
+++ b/tests/rest/client/v1/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
new file mode 100644
index 00000000..ac3b0b58
--- /dev/null
+++ b/tests/rest/client/v1/test_events.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Tests REST events for /events paths."""
+from tests import unittest
+
+# twisted imports
+from twisted.internet import defer
+
+import synapse.rest.client.v1.events
+import synapse.rest.client.v1.register
+import synapse.rest.client.v1.room
+
+
+from ....utils import MockHttpResource, setup_test_homeserver
+from .utils import RestTestCase
+
+from mock import Mock, NonCallableMock
+
+
+PATH_PREFIX = "/_matrix/client/api/v1"
+
+
+class EventStreamPaginationApiTestCase(unittest.TestCase):
+ """ Tests event streaming query parameters and start/end keys used in the
+ Pagination stream API. """
+ user_id = "sid1"
+
+ def setUp(self):
+ # configure stream and inject items
+ pass
+
+ def tearDown(self):
+ pass
+
+ def TODO_test_long_poll(self):
+ # stream from 'end' key, send (self+other) message, expect message.
+
+ # stream from 'END', send (self+other) message, expect message.
+
+ # stream from 'end' key, send (self+other) topic, expect topic.
+
+ # stream from 'END', send (self+other) topic, expect topic.
+
+ # stream from 'end' key, send (self+other) invite, expect invite.
+
+ # stream from 'END', send (self+other) invite, expect invite.
+
+ pass
+
+ def TODO_test_stream_forward(self):
+ # stream from START, expect injected items
+
+ # stream from 'start' key, expect same content
+
+ # stream from 'end' key, expect nothing
+
+ # stream from 'END', expect nothing
+
+ # The following is needed for cases where content is removed e.g. you
+ # left a room, so the token you're streaming from is > the one that
+ # would be returned naturally from START>END.
+ # stream from very new token (higher than end key), expect same token
+ # returned as end key
+ pass
+
+ def TODO_test_limits(self):
+ # stream from a key, expect limit_num items
+
+ # stream from START, expect limit_num items
+
+ pass
+
+ def TODO_test_range(self):
+ # stream from key to key, expect X items
+
+ # stream from key to END, expect X items
+
+ # stream from START to key, expect X items
+
+ # stream from START to END, expect all items
+ pass
+
+ def TODO_test_direction(self):
+ # stream from END to START and fwds, expect newest first
+
+ # stream from END to START and bwds, expect oldest first
+
+ # stream from START to END and fwds, expect oldest first
+
+ # stream from START to END and bwds, expect newest first
+
+ pass
+
+
+class EventStreamPermissionsTestCase(RestTestCase):
+ """ Tests event streaming (GET /events). """
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ hs = yield setup_test_homeserver(
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+ hs.config.enable_registration_captcha = False
+ hs.config.disable_registration = False
+
+ hs.get_handlers().federation_handler = Mock()
+
+ synapse.rest.client.v1.register.register_servlets(hs, self.mock_resource)
+ synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ # register an account
+ self.user_id = "sid1"
+ response = yield self.register(self.user_id)
+ self.token = response["access_token"]
+ self.user_id = response["user_id"]
+
+ # register a 2nd account
+ self.other_user = "other1"
+ response = yield self.register(self.other_user)
+ self.other_token = response["access_token"]
+ self.other_user = response["user_id"]
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_stream_basic_permissions(self):
+ # invalid token, expect 403
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/events?access_token=%s" % ("invalid" + self.token, )
+ )
+ self.assertEquals(403, code, msg=str(response))
+
+ # valid token, expect content
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/events?access_token=%s&timeout=0" % (self.token,)
+ )
+ self.assertEquals(200, code, msg=str(response))
+ self.assertTrue("chunk" in response)
+ self.assertTrue("start" in response)
+ self.assertTrue("end" in response)
+
+ @defer.inlineCallbacks
+ def test_stream_room_permissions(self):
+ room_id = yield self.create_room_as(
+ self.other_user,
+ tok=self.other_token
+ )
+ yield self.send(room_id, tok=self.other_token)
+
+ # invited to room (expect no content for room)
+ yield self.invite(
+ room_id,
+ src=self.other_user,
+ targ=self.user_id,
+ tok=self.other_token
+ )
+
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/events?access_token=%s&timeout=0" % (self.token,)
+ )
+ self.assertEquals(200, code, msg=str(response))
+
+ # We may get a presence event for ourselves down
+ self.assertEquals(
+ 0,
+ len([
+ c for c in response["chunk"]
+ if not (
+ c.get("type") == "m.presence"
+ and c["content"].get("user_id") == self.user_id
+ )
+ ])
+ )
+
+ # joined room (expect all content for room)
+ yield self.join(room=room_id, user=self.user_id, tok=self.token)
+
+ # left to room (expect no content for room)
+
+ def TODO_test_stream_items(self):
+ # new user, no content
+
+ # join room, expect 1 item (join)
+
+ # send message, expect 2 items (join,send)
+
+ # set topic, expect 3 items (join,send,topic)
+
+ # someone else join room, expect 4 (join,send,topic,join)
+
+ # someone else send message, expect 5 (join,send.topic,join,send)
+
+ # someone else set topic, expect 6 (join,send,topic,join,send,topic)
+ pass
diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py
new file mode 100644
index 00000000..8581796f
--- /dev/null
+++ b/tests/rest/client/v1/test_presence.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests REST events for /presence paths."""
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock
+
+from ....utils import MockHttpResource, setup_test_homeserver
+
+from synapse.api.constants import PresenceState
+from synapse.handlers.presence import PresenceHandler
+from synapse.rest.client.v1 import presence
+from synapse.rest.client.v1 import events
+from synapse.types import UserID
+from synapse.util.async import run_on_reactor
+
+from collections import namedtuple
+
+
+OFFLINE = PresenceState.OFFLINE
+UNAVAILABLE = PresenceState.UNAVAILABLE
+ONLINE = PresenceState.ONLINE
+
+
+myid = "@apple:test"
+PATH_PREFIX = "/_matrix/client/api/v1"
+
+
+class NullSource(object):
+ """This event source never yields any events and its token remains at
+ zero. It may be useful for unit-testing."""
+ def __init__(self, hs):
+ pass
+
+ def get_new_events(
+ self,
+ user,
+ from_key,
+ room_ids=None,
+ limit=None,
+ is_guest=None
+ ):
+ return defer.succeed(([], from_key))
+
+ def get_current_key(self, direction='f'):
+ return defer.succeed(0)
+
+ def get_pagination_rows(self, user, pagination_config, key):
+ return defer.succeed(([], pagination_config.from_key))
+
+
+class JustPresenceHandlers(object):
+ def __init__(self, hs):
+ self.presence_handler = PresenceHandler(hs)
+
+
+class PresenceStateTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ hs = yield setup_test_homeserver(
+ datastore=Mock(spec=[
+ "get_presence_state",
+ "set_presence_state",
+ "insert_client_ip",
+ ]),
+ http_client=None,
+ resource_for_client=self.mock_resource,
+ resource_for_federation=self.mock_resource,
+ )
+ hs.handlers = JustPresenceHandlers(hs)
+
+ self.datastore = hs.get_datastore()
+ self.datastore.get_app_service_by_token = Mock(return_value=None)
+
+ def get_presence_list(*a, **kw):
+ return defer.succeed([])
+ self.datastore.get_presence_list = get_presence_list
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(myid),
+ "token_id": 1,
+ "is_guest": False,
+ }
+
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ room_member_handler = hs.handlers.room_member_handler = Mock(
+ spec=[
+ "get_joined_rooms_for_user",
+ ]
+ )
+
+ def get_rooms_for_user(user):
+ return defer.succeed([])
+ room_member_handler.get_joined_rooms_for_user = get_rooms_for_user
+
+ presence.register_servlets(hs, self.mock_resource)
+
+ self.u_apple = UserID.from_string(myid)
+
+ @defer.inlineCallbacks
+ def test_get_my_status(self):
+ mocked_get = self.datastore.get_presence_state
+ mocked_get.return_value = defer.succeed(
+ {"state": ONLINE, "status_msg": "Available"}
+ )
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/presence/%s/status" % (myid), None)
+
+ self.assertEquals(200, code)
+ self.assertEquals(
+ {"presence": ONLINE, "status_msg": "Available"},
+ response
+ )
+ mocked_get.assert_called_with("apple")
+
+ @defer.inlineCallbacks
+ def test_set_my_status(self):
+ mocked_set = self.datastore.set_presence_state
+ mocked_set.return_value = defer.succeed({"state": OFFLINE})
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ "/presence/%s/status" % (myid),
+ '{"presence": "unavailable", "status_msg": "Away"}')
+
+ self.assertEquals(200, code)
+ mocked_set.assert_called_with("apple",
+ {"state": UNAVAILABLE, "status_msg": "Away"}
+ )
+
+
+class PresenceListTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ hs = yield setup_test_homeserver(
+ datastore=Mock(spec=[
+ "has_presence_state",
+ "get_presence_state",
+ "allow_presence_visible",
+ "is_presence_visible",
+ "add_presence_list_pending",
+ "set_presence_list_accepted",
+ "del_presence_list",
+ "get_presence_list",
+ "insert_client_ip",
+ ]),
+ http_client=None,
+ resource_for_client=self.mock_resource,
+ resource_for_federation=self.mock_resource,
+ )
+ hs.handlers = JustPresenceHandlers(hs)
+
+ self.datastore = hs.get_datastore()
+ self.datastore.get_app_service_by_token = Mock(return_value=None)
+
+ def has_presence_state(user_localpart):
+ return defer.succeed(
+ user_localpart in ("apple", "banana",)
+ )
+ self.datastore.has_presence_state = has_presence_state
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(myid),
+ "token_id": 1,
+ "is_guest": False,
+ }
+
+ hs.handlers.room_member_handler = Mock(
+ spec=[
+ "get_joined_rooms_for_user",
+ ]
+ )
+
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ presence.register_servlets(hs, self.mock_resource)
+
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+
+ @defer.inlineCallbacks
+ def test_get_my_list(self):
+ self.datastore.get_presence_list.return_value = defer.succeed(
+ [{"observed_user_id": "@banana:test", "accepted": True}],
+ )
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/presence/list/%s" % (myid), None)
+
+ self.assertEquals(200, code)
+ self.assertEquals([
+ {"user_id": "@banana:test", "presence": OFFLINE, "accepted": True},
+ ], response)
+
+ self.datastore.get_presence_list.assert_called_with(
+ "apple", accepted=True
+ )
+
+ @defer.inlineCallbacks
+ def test_invite(self):
+ self.datastore.add_presence_list_pending.return_value = (
+ defer.succeed(())
+ )
+ self.datastore.is_presence_visible.return_value = defer.succeed(
+ True
+ )
+
+ (code, response) = yield self.mock_resource.trigger("POST",
+ "/presence/list/%s" % (myid),
+ """{"invite": ["@banana:test"]}"""
+ )
+
+ self.assertEquals(200, code)
+
+ self.datastore.add_presence_list_pending.assert_called_with(
+ "apple", "@banana:test"
+ )
+ self.datastore.set_presence_list_accepted.assert_called_with(
+ "apple", "@banana:test"
+ )
+
+ @defer.inlineCallbacks
+ def test_drop(self):
+ self.datastore.del_presence_list.return_value = (
+ defer.succeed(())
+ )
+
+ (code, response) = yield self.mock_resource.trigger("POST",
+ "/presence/list/%s" % (myid),
+ """{"drop": ["@banana:test"]}"""
+ )
+
+ self.assertEquals(200, code)
+
+ self.datastore.del_presence_list.assert_called_with(
+ "apple", "@banana:test"
+ )
+
+
+class PresenceEventStreamTestCase(unittest.TestCase):
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ # HIDEOUS HACKERY
+ # TODO(paul): This should be injected in via the HomeServer DI system
+ from synapse.streams.events import (
+ PresenceEventSource, EventSources
+ )
+
+ old_SOURCE_TYPES = EventSources.SOURCE_TYPES
+ def tearDown():
+ EventSources.SOURCE_TYPES = old_SOURCE_TYPES
+ self.tearDown = tearDown
+
+ EventSources.SOURCE_TYPES = {
+ k: NullSource for k in old_SOURCE_TYPES.keys()
+ }
+ EventSources.SOURCE_TYPES["presence"] = PresenceEventSource
+
+ hs = yield setup_test_homeserver(
+ http_client=None,
+ resource_for_client=self.mock_resource,
+ resource_for_federation=self.mock_resource,
+ datastore=Mock(spec=[
+ "set_presence_state",
+ "get_presence_list",
+ "get_rooms_for_user",
+ ]),
+ clock=Mock(spec=[
+ "call_later",
+ "cancel_call_later",
+ "time_msec",
+ "looping_call",
+ ]),
+ )
+
+ hs.get_clock().time_msec.return_value = 1000000
+
+ def _get_user_by_req(req=None, allow_guest=False):
+ return (UserID.from_string(myid), "", False)
+
+ hs.get_v1auth().get_user_by_req = _get_user_by_req
+
+ presence.register_servlets(hs, self.mock_resource)
+ events.register_servlets(hs, self.mock_resource)
+
+ hs.handlers.room_member_handler = Mock(spec=[])
+
+ self.room_members = []
+
+ def get_rooms_for_user(user):
+ if user in self.room_members:
+ return ["a-room"]
+ else:
+ return []
+ hs.handlers.room_member_handler.get_joined_rooms_for_user = get_rooms_for_user
+ hs.handlers.room_member_handler.get_room_members = (
+ lambda r: self.room_members if r == "a-room" else []
+ )
+ hs.handlers.room_member_handler._filter_events_for_client = (
+ lambda user_id, events, **kwargs: events
+ )
+
+ self.mock_datastore = hs.get_datastore()
+ self.mock_datastore.get_app_service_by_token = Mock(return_value=None)
+ self.mock_datastore.get_app_service_by_user_id = Mock(
+ return_value=defer.succeed(None)
+ )
+ self.mock_datastore.get_rooms_for_user = (
+ lambda u: [
+ namedtuple("Room", "room_id")(r)
+ for r in get_rooms_for_user(UserID.from_string(u))
+ ]
+ )
+
+ def get_profile_displayname(user_id):
+ return defer.succeed("Frank")
+ self.mock_datastore.get_profile_displayname = get_profile_displayname
+
+ def get_profile_avatar_url(user_id):
+ return defer.succeed(None)
+ self.mock_datastore.get_profile_avatar_url = get_profile_avatar_url
+
+ def user_rooms_intersect(user_list):
+ room_member_ids = map(lambda u: u.to_string(), self.room_members)
+
+ shared = all(map(lambda i: i in room_member_ids, user_list))
+ return defer.succeed(shared)
+ self.mock_datastore.user_rooms_intersect = user_rooms_intersect
+
+ def get_joined_hosts_for_room(room_id):
+ return []
+ self.mock_datastore.get_joined_hosts_for_room = get_joined_hosts_for_room
+
+ self.presence = hs.get_handlers().presence_handler
+
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+
+ @defer.inlineCallbacks
+ def test_shortpoll(self):
+ self.room_members = [self.u_apple, self.u_banana]
+
+ self.mock_datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+ self.mock_datastore.get_presence_list.return_value = defer.succeed(
+ []
+ )
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/events?timeout=0", None)
+
+ self.assertEquals(200, code)
+
+ # We've forced there to be only one data stream so the tokens will
+ # all be ours
+
+ # I'll already get my own presence state change
+ self.assertEquals({"start": "0_1_0_0_0", "end": "0_1_0_0_0", "chunk": []},
+ response
+ )
+
+ self.mock_datastore.set_presence_state.return_value = defer.succeed(
+ {"state": ONLINE}
+ )
+ self.mock_datastore.get_presence_list.return_value = defer.succeed([])
+
+ yield self.presence.set_state(self.u_banana, self.u_banana,
+ state={"presence": ONLINE}
+ )
+
+ yield run_on_reactor()
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/events?from=s0_1_0&timeout=0", None)
+
+ self.assertEquals(200, code)
+ self.assertEquals({"start": "s0_1_0_0_0", "end": "s0_2_0_0_0", "chunk": [
+ {"type": "m.presence",
+ "content": {
+ "user_id": "@banana:test",
+ "presence": ONLINE,
+ "displayname": "Frank",
+ "last_active_ago": 0,
+ }},
+ ]}, response)
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
new file mode 100644
index 00000000..adcc1d19
--- /dev/null
+++ b/tests/rest/client/v1/test_profile.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests REST events for /profile paths."""
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, NonCallableMock
+
+from ....utils import MockHttpResource, setup_test_homeserver
+
+from synapse.api.errors import SynapseError, AuthError
+from synapse.types import UserID
+
+from synapse.rest.client.v1 import profile
+
+myid = "@1234ABCD:test"
+PATH_PREFIX = "/_matrix/client/api/v1"
+
+
+class ProfileTestCase(unittest.TestCase):
+ """ Tests profile management. """
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.mock_handler = Mock(spec=[
+ "get_displayname",
+ "set_displayname",
+ "get_avatar_url",
+ "set_avatar_url",
+ ])
+
+ hs = yield setup_test_homeserver(
+ "test",
+ http_client=None,
+ resource_for_client=self.mock_resource,
+ federation=Mock(),
+ replication_layer=Mock(),
+ )
+
+ def _get_user_by_req(request=None, allow_guest=False):
+ return (UserID.from_string(myid), "", False)
+
+ hs.get_v1auth().get_user_by_req = _get_user_by_req
+
+ hs.get_handlers().profile_handler = self.mock_handler
+
+ profile.register_servlets(hs, self.mock_resource)
+
+ @defer.inlineCallbacks
+ def test_get_my_name(self):
+ mocked_get = self.mock_handler.get_displayname
+ mocked_get.return_value = defer.succeed("Frank")
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/profile/%s/displayname" % (myid), None)
+
+ self.assertEquals(200, code)
+ self.assertEquals({"displayname": "Frank"}, response)
+ self.assertEquals(mocked_get.call_args[0][0].localpart, "1234ABCD")
+
+ @defer.inlineCallbacks
+ def test_set_my_name(self):
+ mocked_set = self.mock_handler.set_displayname
+ mocked_set.return_value = defer.succeed(())
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ "/profile/%s/displayname" % (myid),
+ '{"displayname": "Frank Jr."}')
+
+ self.assertEquals(200, code)
+ self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD")
+ self.assertEquals(mocked_set.call_args[0][1].localpart, "1234ABCD")
+ self.assertEquals(mocked_set.call_args[0][2], "Frank Jr.")
+
+ @defer.inlineCallbacks
+ def test_set_my_name_noauth(self):
+ mocked_set = self.mock_handler.set_displayname
+ mocked_set.side_effect = AuthError(400, "message")
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ "/profile/%s/displayname" % ("@4567:test"), '"Frank Jr."')
+
+ self.assertTrue(400 <= code < 499,
+ msg="code %d is in the 4xx range" % (code))
+
+ @defer.inlineCallbacks
+ def test_get_other_name(self):
+ mocked_get = self.mock_handler.get_displayname
+ mocked_get.return_value = defer.succeed("Bob")
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/profile/%s/displayname" % ("@opaque:elsewhere"), None)
+
+ self.assertEquals(200, code)
+ self.assertEquals({"displayname": "Bob"}, response)
+
+ @defer.inlineCallbacks
+ def test_set_other_name(self):
+ mocked_set = self.mock_handler.set_displayname
+ mocked_set.side_effect = SynapseError(400, "message")
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ "/profile/%s/displayname" % ("@opaque:elsewhere"), None)
+
+ self.assertTrue(400 <= code <= 499,
+ msg="code %d is in the 4xx range" % (code))
+
+ @defer.inlineCallbacks
+ def test_get_my_avatar(self):
+ mocked_get = self.mock_handler.get_avatar_url
+ mocked_get.return_value = defer.succeed("http://my.server/me.png")
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/profile/%s/avatar_url" % (myid), None)
+
+ self.assertEquals(200, code)
+ self.assertEquals({"avatar_url": "http://my.server/me.png"}, response)
+ self.assertEquals(mocked_get.call_args[0][0].localpart, "1234ABCD")
+
+ @defer.inlineCallbacks
+ def test_set_my_avatar(self):
+ mocked_set = self.mock_handler.set_avatar_url
+ mocked_set.return_value = defer.succeed(())
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ "/profile/%s/avatar_url" % (myid),
+ '{"avatar_url": "http://my.server/pic.gif"}')
+
+ self.assertEquals(200, code)
+ self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD")
+ self.assertEquals(mocked_set.call_args[0][1].localpart, "1234ABCD")
+ self.assertEquals(mocked_set.call_args[0][2],
+ "http://my.server/pic.gif")
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
new file mode 100644
index 00000000..77493780
--- /dev/null
+++ b/tests/rest/client/v1/test_rooms.py
@@ -0,0 +1,1052 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests REST events for /rooms paths."""
+
+# twisted imports
+from twisted.internet import defer
+
+import synapse.rest.client.v1.room
+from synapse.api.constants import Membership
+
+from synapse.types import UserID
+
+import json
+import urllib
+
+from ....utils import MockHttpResource, setup_test_homeserver
+from .utils import RestTestCase
+
+from mock import Mock, NonCallableMock
+
+PATH_PREFIX = "/_matrix/client/api/v1"
+
+
+class RoomPermissionsTestCase(RestTestCase):
+ """ Tests room permissions. """
+ user_id = "@sid1:red"
+ rmcreator_id = "@notme:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ self.auth_user_id = self.rmcreator_id
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ self.auth = hs.get_v1auth()
+
+ # create some rooms under the name rmcreator_id
+ self.uncreated_rmid = "!aa:test"
+
+ self.created_rmid = yield self.create_room_as(self.rmcreator_id,
+ is_public=False)
+
+ self.created_public_rmid = yield self.create_room_as(self.rmcreator_id,
+ is_public=True)
+
+ # send a message in one of the rooms
+ self.created_rmid_msg_path = ("/rooms/%s/send/m.room.message/a1" %
+ (self.created_rmid))
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ self.created_rmid_msg_path,
+ '{"msgtype":"m.text","body":"test msg"}')
+ self.assertEquals(200, code, msg=str(response))
+
+ # set topic for public room
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ "/rooms/%s/state/m.room.topic" % self.created_public_rmid,
+ '{"topic":"Public Room Topic"}')
+ self.assertEquals(200, code, msg=str(response))
+
+ # auth as user_id now
+ self.auth_user_id = self.user_id
+
+ def tearDown(self):
+ pass
+
+# @defer.inlineCallbacks
+# def test_get_message(self):
+# # get message in uncreated room, expect 403
+# (code, response) = yield self.mock_resource.trigger_get(
+# "/rooms/noroom/messages/someid/m1")
+# self.assertEquals(403, code, msg=str(response))
+#
+# # get message in created room not joined (no state), expect 403
+# (code, response) = yield self.mock_resource.trigger_get(
+# self.created_rmid_msg_path)
+# self.assertEquals(403, code, msg=str(response))
+#
+# # get message in created room and invited, expect 403
+# yield self.invite(room=self.created_rmid, src=self.rmcreator_id,
+# targ=self.user_id)
+# (code, response) = yield self.mock_resource.trigger_get(
+# self.created_rmid_msg_path)
+# self.assertEquals(403, code, msg=str(response))
+#
+# # get message in created room and joined, expect 200
+# yield self.join(room=self.created_rmid, user=self.user_id)
+# (code, response) = yield self.mock_resource.trigger_get(
+# self.created_rmid_msg_path)
+# self.assertEquals(200, code, msg=str(response))
+#
+# # get message in created room and left, expect 403
+# yield self.leave(room=self.created_rmid, user=self.user_id)
+# (code, response) = yield self.mock_resource.trigger_get(
+# self.created_rmid_msg_path)
+# self.assertEquals(403, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_send_message(self):
+ msg_content = '{"msgtype":"m.text","body":"hello"}'
+ send_msg_path = (
+ "/rooms/%s/send/m.room.message/mid1" % (self.created_rmid,)
+ )
+
+ # send message in uncreated room, expect 403
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,),
+ msg_content
+ )
+ self.assertEquals(403, code, msg=str(response))
+
+ # send message in created room not joined (no state), expect 403
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ send_msg_path,
+ msg_content
+ )
+ self.assertEquals(403, code, msg=str(response))
+
+ # send message in created room and invited, expect 403
+ yield self.invite(
+ room=self.created_rmid,
+ src=self.rmcreator_id,
+ targ=self.user_id
+ )
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ send_msg_path,
+ msg_content
+ )
+ self.assertEquals(403, code, msg=str(response))
+
+ # send message in created room and joined, expect 200
+ yield self.join(room=self.created_rmid, user=self.user_id)
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ send_msg_path,
+ msg_content
+ )
+ self.assertEquals(200, code, msg=str(response))
+
+ # send message in created room and left, expect 403
+ yield self.leave(room=self.created_rmid, user=self.user_id)
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ send_msg_path,
+ msg_content
+ )
+ self.assertEquals(403, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_topic_perms(self):
+ topic_content = '{"topic":"My Topic Name"}'
+ topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid
+
+ # set/get topic in uncreated room, expect 403
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid,
+ topic_content)
+ self.assertEquals(403, code, msg=str(response))
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/state/m.room.topic" % self.uncreated_rmid)
+ self.assertEquals(403, code, msg=str(response))
+
+ # set/get topic in created PRIVATE room not joined, expect 403
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT", topic_path, topic_content)
+ self.assertEquals(403, code, msg=str(response))
+ (code, response) = yield self.mock_resource.trigger_get(topic_path)
+ self.assertEquals(403, code, msg=str(response))
+
+ # set topic in created PRIVATE room and invited, expect 403
+ yield self.invite(room=self.created_rmid, src=self.rmcreator_id,
+ targ=self.user_id)
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT", topic_path, topic_content)
+ self.assertEquals(403, code, msg=str(response))
+
+ # get topic in created PRIVATE room and invited, expect 403
+ (code, response) = yield self.mock_resource.trigger_get(topic_path)
+ self.assertEquals(403, code, msg=str(response))
+
+ # set/get topic in created PRIVATE room and joined, expect 200
+ yield self.join(room=self.created_rmid, user=self.user_id)
+
+ # Only room ops can set topic by default
+ self.auth_user_id = self.rmcreator_id
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT", topic_path, topic_content)
+ self.assertEquals(200, code, msg=str(response))
+ self.auth_user_id = self.user_id
+
+ (code, response) = yield self.mock_resource.trigger_get(topic_path)
+ self.assertEquals(200, code, msg=str(response))
+ self.assert_dict(json.loads(topic_content), response)
+
+ # set/get topic in created PRIVATE room and left, expect 403
+ yield self.leave(room=self.created_rmid, user=self.user_id)
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT", topic_path, topic_content)
+ self.assertEquals(403, code, msg=str(response))
+ (code, response) = yield self.mock_resource.trigger_get(topic_path)
+ self.assertEquals(200, code, msg=str(response))
+
+ # get topic in PUBLIC room, not joined, expect 403
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/state/m.room.topic" % self.created_public_rmid)
+ self.assertEquals(403, code, msg=str(response))
+
+ # set topic in PUBLIC room, not joined, expect 403
+ (code, response) = yield self.mock_resource.trigger(
+ "PUT",
+ "/rooms/%s/state/m.room.topic" % self.created_public_rmid,
+ topic_content)
+ self.assertEquals(403, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def _test_get_membership(self, room=None, members=[], expect_code=None):
+ path = "/rooms/%s/state/m.room.member/%s"
+ for member in members:
+ (code, response) = yield self.mock_resource.trigger_get(
+ path %
+ (room, member))
+ self.assertEquals(expect_code, code)
+
+ @defer.inlineCallbacks
+ def test_membership_basic_room_perms(self):
+ # === room does not exist ===
+ room = self.uncreated_rmid
+ # get membership of self, get membership of other, uncreated room
+ # expect all 403s
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=403)
+
+ # trying to invite people to this room should 403
+ yield self.invite(room=room, src=self.user_id, targ=self.rmcreator_id,
+ expect_code=403)
+
+ # set [invite/join/left] of self, set [invite/join/left] of other,
+ # expect all 404s because room doesn't exist on any server
+ for usr in [self.user_id, self.rmcreator_id]:
+ yield self.join(room=room, user=usr, expect_code=404)
+ yield self.leave(room=room, user=usr, expect_code=404)
+
+ @defer.inlineCallbacks
+ def test_membership_private_room_perms(self):
+ room = self.created_rmid
+ # get membership of self, get membership of other, private room + invite
+ # expect all 403s
+ yield self.invite(room=room, src=self.rmcreator_id,
+ targ=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=403)
+
+ # get membership of self, get membership of other, private room + joined
+ # expect all 200s
+ yield self.join(room=room, user=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=200)
+
+ # get membership of self, get membership of other, private room + left
+ # expect all 200s
+ yield self.leave(room=room, user=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=200)
+
+ @defer.inlineCallbacks
+ def test_membership_public_room_perms(self):
+ room = self.created_public_rmid
+ # get membership of self, get membership of other, public room + invite
+ # expect 403
+ yield self.invite(room=room, src=self.rmcreator_id,
+ targ=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=403)
+
+ # get membership of self, get membership of other, public room + joined
+ # expect all 200s
+ yield self.join(room=room, user=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=200)
+
+ # get membership of self, get membership of other, public room + left
+ # expect 200.
+ yield self.leave(room=room, user=self.user_id)
+ yield self._test_get_membership(
+ members=[self.user_id, self.rmcreator_id],
+ room=room, expect_code=200)
+
+ @defer.inlineCallbacks
+ def test_invited_permissions(self):
+ room = self.created_rmid
+ yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+
+ # set [invite/join/left] of other user, expect 403s
+ yield self.invite(room=room, src=self.user_id, targ=self.rmcreator_id,
+ expect_code=403)
+ yield self.change_membership(room=room, src=self.user_id,
+ targ=self.rmcreator_id,
+ membership=Membership.JOIN,
+ expect_code=403)
+ yield self.change_membership(room=room, src=self.user_id,
+ targ=self.rmcreator_id,
+ membership=Membership.LEAVE,
+ expect_code=403)
+
+ @defer.inlineCallbacks
+ def test_joined_permissions(self):
+ room = self.created_rmid
+ yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+ yield self.join(room=room, user=self.user_id)
+
+ # set invited of self, expect 403
+ yield self.invite(room=room, src=self.user_id, targ=self.user_id,
+ expect_code=403)
+
+ # set joined of self, expect 200 (NOOP)
+ yield self.join(room=room, user=self.user_id)
+
+ other = "@burgundy:red"
+ # set invited of other, expect 200
+ yield self.invite(room=room, src=self.user_id, targ=other,
+ expect_code=200)
+
+ # set joined of other, expect 403
+ yield self.change_membership(room=room, src=self.user_id,
+ targ=other,
+ membership=Membership.JOIN,
+ expect_code=403)
+
+ # set left of other, expect 403
+ yield self.change_membership(room=room, src=self.user_id,
+ targ=other,
+ membership=Membership.LEAVE,
+ expect_code=403)
+
+ # set left of self, expect 200
+ yield self.leave(room=room, user=self.user_id)
+
+ @defer.inlineCallbacks
+ def test_leave_permissions(self):
+ room = self.created_rmid
+ yield self.invite(room=room, src=self.rmcreator_id, targ=self.user_id)
+ yield self.join(room=room, user=self.user_id)
+ yield self.leave(room=room, user=self.user_id)
+
+ # set [invite/join/left] of self, set [invite/join/left] of other,
+ # expect all 403s
+ for usr in [self.user_id, self.rmcreator_id]:
+ yield self.change_membership(
+ room=room,
+ src=self.user_id,
+ targ=usr,
+ membership=Membership.INVITE,
+ expect_code=403
+ )
+
+ yield self.change_membership(
+ room=room,
+ src=self.user_id,
+ targ=usr,
+ membership=Membership.JOIN,
+ expect_code=403
+ )
+
+ # It is always valid to LEAVE if you've already left (currently.)
+ yield self.change_membership(
+ room=room,
+ src=self.user_id,
+ targ=self.rmcreator_id,
+ membership=Membership.LEAVE,
+ expect_code=403
+ )
+
+
+class RoomsMemberListTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/members/list REST events."""
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ self.auth_user_id = self.user_id
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_get_member_list(self):
+ room_id = yield self.create_room_as(self.user_id)
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/members" % room_id)
+ self.assertEquals(200, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_get_member_list_no_room(self):
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/roomdoesnotexist/members")
+ self.assertEquals(403, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_get_member_list_no_permission(self):
+ room_id = yield self.create_room_as("@some_other_guy:red")
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/members" % room_id)
+ self.assertEquals(403, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_get_member_list_mixed_memberships(self):
+ room_creator = "@some_other_guy:red"
+ room_id = yield self.create_room_as(room_creator)
+ room_path = "/rooms/%s/members" % room_id
+ yield self.invite(room=room_id, src=room_creator,
+ targ=self.user_id)
+ # can't see list if you're just invited.
+ (code, response) = yield self.mock_resource.trigger_get(room_path)
+ self.assertEquals(403, code, msg=str(response))
+
+ yield self.join(room=room_id, user=self.user_id)
+ # can see list now joined
+ (code, response) = yield self.mock_resource.trigger_get(room_path)
+ self.assertEquals(200, code, msg=str(response))
+
+ yield self.leave(room=room_id, user=self.user_id)
+ # can see old list once left
+ (code, response) = yield self.mock_resource.trigger_get(room_path)
+ self.assertEquals(200, code, msg=str(response))
+
+
+class RoomsCreateTestCase(RestTestCase):
+ """ Tests /rooms and /rooms/$room_id REST events. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_post_room_no_keys(self):
+ # POST with no config keys, expect new room id
+ (code, response) = yield self.mock_resource.trigger("POST",
+ "/createRoom",
+ "{}")
+ self.assertEquals(200, code, response)
+ self.assertTrue("room_id" in response)
+
+ @defer.inlineCallbacks
+ def test_post_room_visibility_key(self):
+ # POST with visibility config key, expect new room id
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/createRoom",
+ '{"visibility":"private"}')
+ self.assertEquals(200, code)
+ self.assertTrue("room_id" in response)
+
+ @defer.inlineCallbacks
+ def test_post_room_custom_key(self):
+ # POST with custom config keys, expect new room id
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/createRoom",
+ '{"custom":"stuff"}')
+ self.assertEquals(200, code)
+ self.assertTrue("room_id" in response)
+
+ @defer.inlineCallbacks
+ def test_post_room_known_and_unknown_keys(self):
+ # POST with custom + known config keys, expect new room id
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/createRoom",
+ '{"visibility":"private","custom":"things"}')
+ self.assertEquals(200, code)
+ self.assertTrue("room_id" in response)
+
+ @defer.inlineCallbacks
+ def test_post_room_invalid_content(self):
+ # POST with invalid content / paths, expect 400
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/createRoom",
+ '{"visibili')
+ self.assertEquals(400, code)
+
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/createRoom",
+ '["hello"]')
+ self.assertEquals(400, code)
+
+
+class RoomTopicTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/topic REST events. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ # create the room
+ self.room_id = yield self.create_room_as(self.user_id)
+ self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,)
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_invalid_puts(self):
+ # missing keys or invalid json
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, '{}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, '{"_name":"bob"}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, '{"nao')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, '[{"_name":"bob"},{"_name":"jill"}]')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, 'text only')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, '')
+ self.assertEquals(400, code, msg=str(response))
+
+ # valid key, wrong type
+ content = '{"topic":["Topic name"]}'
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, content)
+ self.assertEquals(400, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_rooms_topic(self):
+ # nothing should be there
+ (code, response) = yield self.mock_resource.trigger_get(self.path)
+ self.assertEquals(404, code, msg=str(response))
+
+ # valid put
+ content = '{"topic":"Topic name"}'
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+ # valid get
+ (code, response) = yield self.mock_resource.trigger_get(self.path)
+ self.assertEquals(200, code, msg=str(response))
+ self.assert_dict(json.loads(content), response)
+
+ @defer.inlineCallbacks
+ def test_rooms_topic_with_extra_keys(self):
+ # valid put with extra keys
+ content = '{"topic":"Seasons","subtopic":"Summer"}'
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ self.path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+ # valid get
+ (code, response) = yield self.mock_resource.trigger_get(self.path)
+ self.assertEquals(200, code, msg=str(response))
+ self.assert_dict(json.loads(content), response)
+
+
+class RoomMemberStateTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/members/$user_id/state REST events. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ self.room_id = yield self.create_room_as(self.user_id)
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_invalid_puts(self):
+ path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id)
+ # missing keys or invalid json
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{"_name":"bob"}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{"nao')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '[{"_name":"bob"},{"_name":"jill"}]')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, 'text only')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '')
+ self.assertEquals(400, code, msg=str(response))
+
+ # valid keys, wrong types
+ content = ('{"membership":["%s","%s","%s"]}' %
+ (Membership.INVITE, Membership.JOIN, Membership.LEAVE))
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(400, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_rooms_members_self(self):
+ path = "/rooms/%s/state/m.room.member/%s" % (
+ urllib.quote(self.room_id), self.user_id
+ )
+
+ # valid join message (NOOP since we made the room)
+ content = '{"membership":"%s"}' % Membership.JOIN
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("GET", path, None)
+ self.assertEquals(200, code, msg=str(response))
+
+ expected_response = {
+ "membership": Membership.JOIN,
+ }
+ self.assertEquals(expected_response, response)
+
+ @defer.inlineCallbacks
+ def test_rooms_members_other(self):
+ self.other_id = "@zzsid1:red"
+ path = "/rooms/%s/state/m.room.member/%s" % (
+ urllib.quote(self.room_id), self.other_id
+ )
+
+ # valid invite message
+ content = '{"membership":"%s"}' % Membership.INVITE
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("GET", path, None)
+ self.assertEquals(200, code, msg=str(response))
+ self.assertEquals(json.loads(content), response)
+
+ @defer.inlineCallbacks
+ def test_rooms_members_other_custom_keys(self):
+ self.other_id = "@zzsid1:red"
+ path = "/rooms/%s/state/m.room.member/%s" % (
+ urllib.quote(self.room_id), self.other_id
+ )
+
+ # valid invite message with custom key
+ content = ('{"membership":"%s","invite_text":"%s"}' %
+ (Membership.INVITE, "Join us!"))
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("GET", path, None)
+ self.assertEquals(200, code, msg=str(response))
+ self.assertEquals(json.loads(content), response)
+
+
+class RoomMessagesTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/messages/$user_id/$msg_id REST events. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ self.room_id = yield self.create_room_as(self.user_id)
+
+ def tearDown(self):
+ pass
+
+ @defer.inlineCallbacks
+ def test_invalid_puts(self):
+ path = "/rooms/%s/send/m.room.message/mid1" % (
+ urllib.quote(self.room_id))
+ # missing keys or invalid json
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{"_name":"bob"}')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '{"nao')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '[{"_name":"bob"},{"_name":"jill"}]')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, 'text only')
+ self.assertEquals(400, code, msg=str(response))
+
+ (code, response) = yield self.mock_resource.trigger("PUT",
+ path, '')
+ self.assertEquals(400, code, msg=str(response))
+
+ @defer.inlineCallbacks
+ def test_rooms_messages_sent(self):
+ path = "/rooms/%s/send/m.room.message/mid1" % (
+ urllib.quote(self.room_id))
+
+ content = '{"body":"test","msgtype":{"type":"a"}}'
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(400, code, msg=str(response))
+
+ # custom message types
+ content = '{"body":"test","msgtype":"test.custom.text"}'
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+# (code, response) = yield self.mock_resource.trigger("GET", path, None)
+# self.assertEquals(200, code, msg=str(response))
+# self.assert_dict(json.loads(content), response)
+
+ # m.text message type
+ path = "/rooms/%s/send/m.room.message/mid2" % (
+ urllib.quote(self.room_id))
+ content = '{"body":"test2","msgtype":"m.text"}'
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(200, code, msg=str(response))
+
+
+class RoomInitialSyncTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/initialSync. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ # Since I'm getting my own presence I need to exist as far as presence
+ # is concerned.
+ hs.get_handlers().presence_handler.registered_user(
+ UserID.from_string(self.user_id)
+ )
+
+ # create the room
+ self.room_id = yield self.create_room_as(self.user_id)
+
+ @defer.inlineCallbacks
+ def test_initial_sync(self):
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/initialSync" % self.room_id)
+ self.assertEquals(200, code)
+
+ self.assertEquals(self.room_id, response["room_id"])
+ self.assertEquals("join", response["membership"])
+
+ # Room state is easier to assert on if we unpack it into a dict
+ state = {}
+ for event in response["state"]:
+ if "state_key" not in event:
+ continue
+ t = event["type"]
+ if t not in state:
+ state[t] = []
+ state[t].append(event)
+
+ self.assertTrue("m.room.create" in state)
+
+ self.assertTrue("messages" in response)
+ self.assertTrue("chunk" in response["messages"])
+ self.assertTrue("end" in response["messages"])
+
+ self.assertTrue("presence" in response)
+
+ presence_by_user = {e["content"]["user_id"]: e
+ for e in response["presence"]
+ }
+ self.assertTrue(self.user_id in presence_by_user)
+ self.assertEquals("m.presence", presence_by_user[self.user_id]["type"])
+
+
+class RoomMessageListTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/messages REST events. """
+ user_id = "@sid1:red"
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=["send_message"]),
+ )
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ self.room_id = yield self.create_room_as(self.user_id)
+
+ @defer.inlineCallbacks
+ def test_topo_token_is_accepted(self):
+ token = "t1-0_0_0_0_0"
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/messages?access_token=x&from=%s" %
+ (self.room_id, token))
+ self.assertEquals(200, code)
+ self.assertTrue("start" in response)
+ self.assertEquals(token, response['start'])
+ self.assertTrue("chunk" in response)
+ self.assertTrue("end" in response)
+
+ @defer.inlineCallbacks
+ def test_stream_token_is_rejected(self):
+ (code, response) = yield self.mock_resource.trigger_get(
+ "/rooms/%s/messages?access_token=x&from=s0_0_0_0" %
+ self.room_id)
+ self.assertEquals(400, code)
diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py
new file mode 100644
index 00000000..61b9cc74
--- /dev/null
+++ b/tests/rest/client/v1/test_typing.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests REST events for /rooms paths."""
+
+# twisted imports
+from twisted.internet import defer
+
+import synapse.rest.client.v1.room
+from synapse.types import UserID
+
+from ....utils import MockHttpResource, MockClock, setup_test_homeserver
+from .utils import RestTestCase
+
+from mock import Mock, NonCallableMock
+
+
+PATH_PREFIX = "/_matrix/client/api/v1"
+
+
+class RoomTypingTestCase(RestTestCase):
+ """ Tests /rooms/$room_id/typing/$user_id REST API. """
+ user_id = "@sid:red"
+
+ user = UserID.from_string(user_id)
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.clock = MockClock()
+
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+ self.auth_user_id = self.user_id
+
+ hs = yield setup_test_homeserver(
+ "red",
+ clock=self.clock,
+ http_client=None,
+ replication_layer=Mock(),
+ ratelimiter=NonCallableMock(spec_set=[
+ "send_message",
+ ]),
+ )
+ self.hs = hs
+
+ self.event_source = hs.get_event_sources().sources["typing"]
+
+ self.ratelimiter = hs.get_ratelimiter()
+ self.ratelimiter.send_message.return_value = (True, 0)
+
+ hs.get_handlers().federation_handler = Mock()
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.auth_user_id),
+ "token_id": 1,
+ "is_guest": False,
+ }
+
+ hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token
+
+ def _insert_client_ip(*args, **kwargs):
+ return defer.succeed(None)
+ hs.get_datastore().insert_client_ip = _insert_client_ip
+
+ def get_room_members(room_id):
+ if room_id == self.room_id:
+ return defer.succeed([self.user])
+ else:
+ return defer.succeed([])
+
+ @defer.inlineCallbacks
+ def fetch_room_distributions_into(room_id, localusers=None,
+ remotedomains=None, ignore_user=None):
+
+ members = yield get_room_members(room_id)
+ for member in members:
+ if ignore_user is not None and member == ignore_user:
+ continue
+
+ if hs.is_mine(member):
+ if localusers is not None:
+ localusers.add(member)
+ else:
+ if remotedomains is not None:
+ remotedomains.add(member.domain)
+ hs.get_handlers().room_member_handler.fetch_room_distributions_into = (
+ fetch_room_distributions_into)
+
+ synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
+
+ self.room_id = yield self.create_room_as(self.user_id)
+ # Need another user to make notifications actually work
+ yield self.join(self.room_id, user="@jim:red")
+
+ def tearDown(self):
+ self.hs.get_handlers().typing_notification_handler.tearDown()
+
+ @defer.inlineCallbacks
+ def test_set_typing(self):
+ (code, _) = yield self.mock_resource.trigger("PUT",
+ "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+ '{"typing": true, "timeout": 30000}'
+ )
+ self.assertEquals(200, code)
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+ events = yield self.event_source.get_new_events(
+ from_key=0,
+ room_ids=[self.room_id],
+ )
+ self.assertEquals(
+ events[0],
+ [
+ {"type": "m.typing",
+ "room_id": self.room_id,
+ "content": {
+ "user_ids": [self.user_id],
+ }},
+ ]
+ )
+
+ @defer.inlineCallbacks
+ def test_set_not_typing(self):
+ (code, _) = yield self.mock_resource.trigger("PUT",
+ "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+ '{"typing": false}'
+ )
+ self.assertEquals(200, code)
+
+ @defer.inlineCallbacks
+ def test_typing_timeout(self):
+ (code, _) = yield self.mock_resource.trigger("PUT",
+ "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+ '{"typing": true, "timeout": 30000}'
+ )
+ self.assertEquals(200, code)
+
+ self.assertEquals(self.event_source.get_current_key(), 1)
+
+ self.clock.advance_time(31);
+
+ self.assertEquals(self.event_source.get_current_key(), 2)
+
+ (code, _) = yield self.mock_resource.trigger("PUT",
+ "/rooms/%s/typing/%s" % (self.room_id, self.user_id),
+ '{"typing": true, "timeout": 30000}'
+ )
+ self.assertEquals(200, code)
+
+ self.assertEquals(self.event_source.get_current_key(), 3)
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
new file mode 100644
index 00000000..85096a03
--- /dev/null
+++ b/tests/rest/client/v1/utils.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# twisted imports
+from twisted.internet import defer
+
+# trial imports
+from tests import unittest
+
+from synapse.api.constants import Membership
+
+import json
+import time
+
+
+class RestTestCase(unittest.TestCase):
+ """Contains extra helper functions to quickly and clearly perform a given
+ REST action, which isn't the focus of the test.
+
+ This subclass assumes there are mock_resource and auth_user_id attributes.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(RestTestCase, self).__init__(*args, **kwargs)
+ self.mock_resource = None
+ self.auth_user_id = None
+
+ @defer.inlineCallbacks
+ def create_room_as(self, room_creator, is_public=True, tok=None):
+ temp_id = self.auth_user_id
+ self.auth_user_id = room_creator
+ path = "/createRoom"
+ content = "{}"
+ if not is_public:
+ content = '{"visibility":"private"}'
+ if tok:
+ path = path + "?access_token=%s" % tok
+ (code, response) = yield self.mock_resource.trigger("POST", path, content)
+ self.assertEquals(200, code, msg=str(response))
+ self.auth_user_id = temp_id
+ defer.returnValue(response["room_id"])
+
+ @defer.inlineCallbacks
+ def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
+ yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
+ membership=Membership.INVITE,
+ expect_code=expect_code)
+
+ @defer.inlineCallbacks
+ def join(self, room=None, user=None, expect_code=200, tok=None):
+ yield self.change_membership(room=room, src=user, targ=user, tok=tok,
+ membership=Membership.JOIN,
+ expect_code=expect_code)
+
+ @defer.inlineCallbacks
+ def leave(self, room=None, user=None, expect_code=200, tok=None):
+ yield self.change_membership(room=room, src=user, targ=user, tok=tok,
+ membership=Membership.LEAVE,
+ expect_code=expect_code)
+
+ @defer.inlineCallbacks
+ def change_membership(self, room, src, targ, membership, tok=None,
+ expect_code=200):
+ temp_id = self.auth_user_id
+ self.auth_user_id = src
+
+ path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
+ if tok:
+ path = path + "?access_token=%s" % tok
+
+ data = {
+ "membership": membership
+ }
+
+ (code, response) = yield self.mock_resource.trigger("PUT", path,
+ json.dumps(data))
+ self.assertEquals(expect_code, code, msg=str(response))
+
+ self.auth_user_id = temp_id
+
+ @defer.inlineCallbacks
+ def register(self, user_id):
+ (code, response) = yield self.mock_resource.trigger(
+ "POST",
+ "/register",
+ json.dumps({
+ "user": user_id,
+ "password": "test",
+ "type": "m.login.password"
+ }))
+ self.assertEquals(200, code)
+ defer.returnValue(response)
+
+ @defer.inlineCallbacks
+ def send(self, room_id, body=None, txn_id=None, tok=None,
+ expect_code=200):
+ if txn_id is None:
+ txn_id = "m%s" % (str(time.time()))
+ if body is None:
+ body = "body_text_here"
+
+ path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
+ content = '{"msgtype":"m.text","body":"%s"}' % body
+ if tok:
+ path = path + "?access_token=%s" % tok
+
+ (code, response) = yield self.mock_resource.trigger("PUT", path, content)
+ self.assertEquals(expect_code, code, msg=str(response))
+
+ def assert_dict(self, required, actual):
+ """Does a partial assert of a dict.
+
+ Args:
+ required (dict): The keys and value which MUST be in 'actual'.
+ actual (dict): The test result. Extra keys will not be checked.
+ """
+ for key in required:
+ self.assertEquals(required[key], actual[key],
+ msg="%s mismatch. %s" % (key, actual))
diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py
new file mode 100644
index 00000000..fa9e17ec
--- /dev/null
+++ b/tests/rest/client/v2_alpha/__init__.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+
+from mock import Mock
+
+from ....utils import MockHttpResource, setup_test_homeserver
+
+from synapse.types import UserID
+
+from twisted.internet import defer
+
+
+PATH_PREFIX = "/_matrix/client/v2_alpha"
+
+
+class V2AlphaRestTestCase(unittest.TestCase):
+ # Consumer must define
+ # USER_ID = <some string>
+ # TO_REGISTER = [<list of REST servlets to register>]
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.mock_resource = MockHttpResource(prefix=PATH_PREFIX)
+
+ hs = yield setup_test_homeserver(
+ datastore=self.make_datastore_mock(),
+ http_client=None,
+ resource_for_client=self.mock_resource,
+ resource_for_federation=self.mock_resource,
+ )
+
+ def _get_user_by_access_token(token=None, allow_guest=False):
+ return {
+ "user": UserID.from_string(self.USER_ID),
+ "token_id": 1,
+ "is_guest": False,
+ }
+ hs.get_auth()._get_user_by_access_token = _get_user_by_access_token
+
+ for r in self.TO_REGISTER:
+ r.register_servlets(hs, self.mock_resource)
+
+ def make_datastore_mock(self):
+ store = Mock(spec=[
+ "insert_client_ip",
+ ])
+ store.get_app_service_by_token = Mock(return_value=None)
+ return store
diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py
new file mode 100644
index 00000000..80ddabf8
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_filter.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+from mock import Mock
+
+from . import V2AlphaRestTestCase
+
+from synapse.rest.client.v2_alpha import filter
+
+from synapse.api.errors import StoreError
+
+
+class FilterTestCase(V2AlphaRestTestCase):
+ USER_ID = "@apple:test"
+ TO_REGISTER = [filter]
+
+ def make_datastore_mock(self):
+ datastore = super(FilterTestCase, self).make_datastore_mock()
+
+ self._user_filters = {}
+
+ def add_user_filter(user_localpart, definition):
+ filters = self._user_filters.setdefault(user_localpart, [])
+ filter_id = len(filters)
+ filters.append(definition)
+ return defer.succeed(filter_id)
+ datastore.add_user_filter = add_user_filter
+
+ def get_user_filter(user_localpart, filter_id):
+ if user_localpart not in self._user_filters:
+ raise StoreError(404, "No user")
+ filters = self._user_filters[user_localpart]
+ if filter_id >= len(filters):
+ raise StoreError(404, "No filter")
+ return defer.succeed(filters[filter_id])
+ datastore.get_user_filter = get_user_filter
+
+ return datastore
+
+ @defer.inlineCallbacks
+ def test_add_filter(self):
+ (code, response) = yield self.mock_resource.trigger("POST",
+ "/user/%s/filter" % (self.USER_ID),
+ '{"type": ["m.*"]}'
+ )
+ self.assertEquals(200, code)
+ self.assertEquals({"filter_id": "0"}, response)
+
+ self.assertIn("apple", self._user_filters)
+ self.assertEquals(len(self._user_filters["apple"]), 1)
+ self.assertEquals({"type": ["m.*"]}, self._user_filters["apple"][0])
+
+ @defer.inlineCallbacks
+ def test_get_filter(self):
+ self._user_filters["apple"] = [
+ {"type": ["m.*"]}
+ ]
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/user/%s/filter/0" % (self.USER_ID), None
+ )
+ self.assertEquals(200, code)
+ self.assertEquals({"type": ["m.*"]}, response)
+
+ @defer.inlineCallbacks
+ def test_get_filter_no_id(self):
+ self._user_filters["apple"] = [
+ {"type": ["m.*"]}
+ ]
+
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/user/%s/filter/2" % (self.USER_ID), None
+ )
+ self.assertEquals(404, code)
+
+ @defer.inlineCallbacks
+ def test_get_filter_no_user(self):
+ (code, response) = yield self.mock_resource.trigger("GET",
+ "/user/%s/filter/0" % (self.USER_ID), None
+ )
+ self.assertEquals(404, code)
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
new file mode 100644
index 00000000..f9a2b224
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -0,0 +1,135 @@
+from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+from synapse.api.errors import SynapseError
+from twisted.internet import defer
+from mock import Mock, MagicMock
+from tests import unittest
+import json
+
+
+class RegisterRestServletTestCase(unittest.TestCase):
+
+ def setUp(self):
+ # do the dance to hook up request data to self.request_data
+ self.request_data = ""
+ self.request = Mock(
+ content=Mock(read=Mock(side_effect=lambda: self.request_data)),
+ path='/_matrix/api/v2_alpha/register'
+ )
+ self.request.args = {}
+
+ self.appservice = None
+ self.auth = Mock(get_appservice_by_req=Mock(
+ side_effect=lambda x: defer.succeed(self.appservice))
+ )
+
+ self.auth_result = (False, None, None)
+ self.auth_handler = Mock(
+ check_auth=Mock(side_effect=lambda x,y,z: self.auth_result)
+ )
+ self.registration_handler = Mock()
+ self.identity_handler = Mock()
+ self.login_handler = Mock()
+
+ # do the dance to hook it up to the hs global
+ self.handlers = Mock(
+ auth_handler=self.auth_handler,
+ registration_handler=self.registration_handler,
+ identity_handler=self.identity_handler,
+ login_handler=self.login_handler
+ )
+ self.hs = Mock()
+ self.hs.hostname = "superbig~testing~thing.com"
+ self.hs.get_auth = Mock(return_value=self.auth)
+ self.hs.get_handlers = Mock(return_value=self.handlers)
+ self.hs.config.disable_registration = False
+
+ # init the thing we're testing
+ self.servlet = RegisterRestServlet(self.hs)
+
+ @defer.inlineCallbacks
+ def test_POST_appservice_registration_valid(self):
+ user_id = "@kermit:muppet"
+ token = "kermits_access_token"
+ self.request.args = {
+ "access_token": "i_am_an_app_service"
+ }
+ self.request_data = json.dumps({
+ "username": "kermit"
+ })
+ self.appservice = {
+ "id": "1234"
+ }
+ self.registration_handler.appservice_register = Mock(
+ return_value=(user_id, token)
+ )
+ result = yield self.servlet.on_POST(self.request)
+ self.assertEquals(result, (200, {
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname
+ }))
+
+ @defer.inlineCallbacks
+ def test_POST_appservice_registration_invalid(self):
+ self.request.args = {
+ "access_token": "i_am_an_app_service"
+ }
+ self.request_data = json.dumps({
+ "username": "kermit"
+ })
+ self.appservice = None # no application service exists
+ result = yield self.servlet.on_POST(self.request)
+ self.assertEquals(result, (401, None))
+
+ def test_POST_bad_password(self):
+ self.request_data = json.dumps({
+ "username": "kermit",
+ "password": 666
+ })
+ d = self.servlet.on_POST(self.request)
+ return self.assertFailure(d, SynapseError)
+
+ def test_POST_bad_username(self):
+ self.request_data = json.dumps({
+ "username": 777,
+ "password": "monkey"
+ })
+ d = self.servlet.on_POST(self.request)
+ return self.assertFailure(d, SynapseError)
+
+ @defer.inlineCallbacks
+ def test_POST_user_valid(self):
+ user_id = "@kermit:muppet"
+ token = "kermits_access_token"
+ self.request_data = json.dumps({
+ "username": "kermit",
+ "password": "monkey"
+ })
+ self.registration_handler.check_username = Mock(return_value=True)
+ self.auth_result = (True, None, {
+ "username": "kermit",
+ "password": "monkey"
+ })
+ self.registration_handler.register = Mock(return_value=(user_id, token))
+
+ result = yield self.servlet.on_POST(self.request)
+ self.assertEquals(result, (200, {
+ "user_id": user_id,
+ "access_token": token,
+ "home_server": self.hs.hostname
+ }))
+
+ def test_POST_disabled_registration(self):
+ self.hs.config.disable_registration = True
+ self.request_data = json.dumps({
+ "username": "kermit",
+ "password": "monkey"
+ })
+ self.registration_handler.check_username = Mock(return_value=True)
+ self.auth_result = (True, None, {
+ "username": "kermit",
+ "password": "monkey"
+ })
+ self.registration_handler.register = Mock(return_value=("@user:id", "t"))
+ d = self.servlet.on_POST(self.request)
+ return self.assertFailure(d, SynapseError)
diff --git a/tests/storage/__init__.py b/tests/storage/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/storage/__init__.py
diff --git a/tests/storage/event_injector.py b/tests/storage/event_injector.py
new file mode 100644
index 00000000..42bd8928
--- /dev/null
+++ b/tests/storage/event_injector.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID, RoomID
+
+from tests.utils import setup_test_homeserver
+
+from mock import Mock
+
+
+class EventInjector:
+ def __init__(self, hs):
+ self.hs = hs
+ self.store = hs.get_datastore()
+ self.message_handler = hs.get_handlers().message_handler
+ self.event_builder_factory = hs.get_event_builder_factory()
+
+ @defer.inlineCallbacks
+ def create_room(self, room):
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Create,
+ "room_id": room.to_string(),
+ "content": {},
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ @defer.inlineCallbacks
+ def inject_room_member(self, room, user, membership):
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": {"membership": membership},
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def inject_message(self, room, user, body):
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Message,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": {"body": body, "msgtype": u"message"},
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
new file mode 100644
index 00000000..e72cace8
--- /dev/null
+++ b/tests/storage/test__base.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.util.async import ObservableDeferred
+
+from synapse.util.caches.descriptors import Cache, cached
+
+
+class CacheTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.cache = Cache("test")
+
+ def test_empty(self):
+ failed = False
+ try:
+ self.cache.get("foo")
+ except KeyError:
+ failed = True
+
+ self.assertTrue(failed)
+
+ def test_hit(self):
+ self.cache.prefill("foo", 123)
+
+ self.assertEquals(self.cache.get("foo"), 123)
+
+ def test_invalidate(self):
+ self.cache.prefill(("foo",), 123)
+ self.cache.invalidate(("foo",))
+
+ failed = False
+ try:
+ self.cache.get(("foo",))
+ except KeyError:
+ failed = True
+
+ self.assertTrue(failed)
+
+ def test_eviction(self):
+ cache = Cache("test", max_entries=2)
+
+ cache.prefill(1, "one")
+ cache.prefill(2, "two")
+ cache.prefill(3, "three") # 1 will be evicted
+
+ failed = False
+ try:
+ cache.get(1)
+ except KeyError:
+ failed = True
+
+ self.assertTrue(failed)
+
+ cache.get(2)
+ cache.get(3)
+
+ def test_eviction_lru(self):
+ cache = Cache("test", max_entries=2, lru=True)
+
+ cache.prefill(1, "one")
+ cache.prefill(2, "two")
+
+ # Now access 1 again, thus causing 2 to be least-recently used
+ cache.get(1)
+
+ cache.prefill(3, "three")
+
+ failed = False
+ try:
+ cache.get(2)
+ except KeyError:
+ failed = True
+
+ self.assertTrue(failed)
+
+ cache.get(1)
+ cache.get(3)
+
+
+class CacheDecoratorTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def test_passthrough(self):
+ class A(object):
+ @cached()
+ def func(self, key):
+ return key
+
+ a = A()
+
+ self.assertEquals((yield a.func("foo")), "foo")
+ self.assertEquals((yield a.func("bar")), "bar")
+
+ @defer.inlineCallbacks
+ def test_hit(self):
+ callcount = [0]
+
+ class A(object):
+ @cached()
+ def func(self, key):
+ callcount[0] += 1
+ return key
+
+ a = A()
+ yield a.func("foo")
+
+ self.assertEquals(callcount[0], 1)
+
+ self.assertEquals((yield a.func("foo")), "foo")
+ self.assertEquals(callcount[0], 1)
+
+ @defer.inlineCallbacks
+ def test_invalidate(self):
+ callcount = [0]
+
+ class A(object):
+ @cached()
+ def func(self, key):
+ callcount[0] += 1
+ return key
+
+ a = A()
+ yield a.func("foo")
+
+ self.assertEquals(callcount[0], 1)
+
+ a.func.invalidate(("foo",))
+
+ yield a.func("foo")
+
+ self.assertEquals(callcount[0], 2)
+
+ def test_invalidate_missing(self):
+ class A(object):
+ @cached()
+ def func(self, key):
+ return key
+
+ A().func.invalidate(("what",))
+
+ @defer.inlineCallbacks
+ def test_max_entries(self):
+ callcount = [0]
+
+ class A(object):
+ @cached(max_entries=10)
+ def func(self, key):
+ callcount[0] += 1
+ return key
+
+ a = A()
+
+ for k in range(0, 12):
+ yield a.func(k)
+
+ self.assertEquals(callcount[0], 12)
+
+ # There must have been at least 2 evictions, meaning if we calculate
+ # all 12 values again, we must get called at least 2 more times
+ for k in range(0,12):
+ yield a.func(k)
+
+ self.assertTrue(callcount[0] >= 14,
+ msg="Expected callcount >= 14, got %d" % (callcount[0]))
+
+ def test_prefill(self):
+ callcount = [0]
+
+ d = defer.succeed(123)
+
+ class A(object):
+ @cached()
+ def func(self, key):
+ callcount[0] += 1
+ return d
+
+ a = A()
+
+ a.func.prefill(("foo",), ObservableDeferred(d))
+
+ self.assertEquals(a.func("foo").result, d.result)
+ self.assertEquals(callcount[0], 0)
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
new file mode 100644
index 00000000..77376b34
--- /dev/null
+++ b/tests/storage/test_appservice.py
@@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from tests import unittest
+from twisted.internet import defer
+
+from tests.utils import setup_test_homeserver
+from synapse.appservice import ApplicationService, ApplicationServiceState
+from synapse.server import HomeServer
+from synapse.storage.appservice import (
+ ApplicationServiceStore, ApplicationServiceTransactionStore
+)
+
+import json
+import os
+import yaml
+from mock import Mock
+from tests.utils import SQLiteMemoryDbPool, MockClock
+
+
+class ApplicationServiceStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.as_yaml_files = []
+ config = Mock(
+ app_service_config_files=self.as_yaml_files
+ )
+ hs = yield setup_test_homeserver(config=config)
+
+ self.as_token = "token1"
+ self.as_url = "some_url"
+ self._add_appservice(self.as_token, self.as_url, "some_hs_token", "bob")
+ self._add_appservice("token2", "some_url", "some_hs_token", "bob")
+ self._add_appservice("token3", "some_url", "some_hs_token", "bob")
+ # must be done after inserts
+ self.store = ApplicationServiceStore(hs)
+
+ def tearDown(self):
+ # TODO: suboptimal that we need to create files for tests!
+ for f in self.as_yaml_files:
+ try:
+ os.remove(f)
+ except:
+ pass
+
+ def _add_appservice(self, as_token, url, hs_token, sender):
+ as_yaml = dict(url=url, as_token=as_token, hs_token=hs_token,
+ sender_localpart=sender, namespaces={})
+ # use the token as the filename
+ with open(as_token, 'w') as outfile:
+ outfile.write(yaml.dump(as_yaml))
+ self.as_yaml_files.append(as_token)
+
+ @defer.inlineCallbacks
+ def test_retrieve_unknown_service_token(self):
+ service = yield self.store.get_app_service_by_token("invalid_token")
+ self.assertEquals(service, None)
+
+ @defer.inlineCallbacks
+ def test_retrieval_of_service(self):
+ stored_service = yield self.store.get_app_service_by_token(
+ self.as_token
+ )
+ self.assertEquals(stored_service.token, self.as_token)
+ self.assertEquals(stored_service.url, self.as_url)
+ self.assertEquals(
+ stored_service.namespaces[ApplicationService.NS_ALIASES],
+ []
+ )
+ self.assertEquals(
+ stored_service.namespaces[ApplicationService.NS_ROOMS],
+ []
+ )
+ self.assertEquals(
+ stored_service.namespaces[ApplicationService.NS_USERS],
+ []
+ )
+
+ @defer.inlineCallbacks
+ def test_retrieval_of_all_services(self):
+ services = yield self.store.get_app_services()
+ self.assertEquals(len(services), 3)
+
+
+class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.as_yaml_files = []
+
+ config = Mock(
+ app_service_config_files=self.as_yaml_files
+ )
+ hs = yield setup_test_homeserver(config=config)
+ self.db_pool = hs.get_db_pool()
+
+ self.as_list = [
+ {
+ "token": "token1",
+ "url": "https://matrix-as.org",
+ "id": "token1"
+ },
+ {
+ "token": "alpha_tok",
+ "url": "https://alpha.com",
+ "id": "alpha_tok"
+ },
+ {
+ "token": "beta_tok",
+ "url": "https://beta.com",
+ "id": "beta_tok"
+ },
+ {
+ "token": "delta_tok",
+ "url": "https://delta.com",
+ "id": "delta_tok"
+ },
+ ]
+ for s in self.as_list:
+ yield self._add_service(s["url"], s["token"])
+
+ self.as_yaml_files = []
+
+ self.store = TestTransactionStore(hs)
+
+ def _add_service(self, url, as_token):
+ as_yaml = dict(url=url, as_token=as_token, hs_token="something",
+ sender_localpart="a_sender", namespaces={})
+ # use the token as the filename
+ with open(as_token, 'w') as outfile:
+ outfile.write(yaml.dump(as_yaml))
+ self.as_yaml_files.append(as_token)
+
+ def _set_state(self, id, state, txn=None):
+ return self.db_pool.runQuery(
+ "INSERT INTO application_services_state(as_id, state, last_txn) "
+ "VALUES(?,?,?)",
+ (id, state, txn)
+ )
+
+ def _insert_txn(self, as_id, txn_id, events):
+ return self.db_pool.runQuery(
+ "INSERT INTO application_services_txns(as_id, txn_id, event_ids) "
+ "VALUES(?,?,?)",
+ (as_id, txn_id, json.dumps([e.event_id for e in events]))
+ )
+
+ def _set_last_txn(self, as_id, txn_id):
+ return self.db_pool.runQuery(
+ "INSERT INTO application_services_state(as_id, last_txn, state) "
+ "VALUES(?,?,?)",
+ (as_id, txn_id, ApplicationServiceState.UP)
+ )
+
+ @defer.inlineCallbacks
+ def test_get_appservice_state_none(self):
+ service = Mock(id=999)
+ state = yield self.store.get_appservice_state(service)
+ self.assertEquals(None, state)
+
+ @defer.inlineCallbacks
+ def test_get_appservice_state_up(self):
+ yield self._set_state(
+ self.as_list[0]["id"], ApplicationServiceState.UP
+ )
+ service = Mock(id=self.as_list[0]["id"])
+ state = yield self.store.get_appservice_state(service)
+ self.assertEquals(ApplicationServiceState.UP, state)
+
+ @defer.inlineCallbacks
+ def test_get_appservice_state_down(self):
+ yield self._set_state(
+ self.as_list[0]["id"], ApplicationServiceState.UP
+ )
+ yield self._set_state(
+ self.as_list[1]["id"], ApplicationServiceState.DOWN
+ )
+ yield self._set_state(
+ self.as_list[2]["id"], ApplicationServiceState.DOWN
+ )
+ service = Mock(id=self.as_list[1]["id"])
+ state = yield self.store.get_appservice_state(service)
+ self.assertEquals(ApplicationServiceState.DOWN, state)
+
+ @defer.inlineCallbacks
+ def test_get_appservices_by_state_none(self):
+ services = yield self.store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
+ )
+ self.assertEquals(0, len(services))
+
+ @defer.inlineCallbacks
+ def test_set_appservices_state_down(self):
+ service = Mock(id=self.as_list[1]["id"])
+ yield self.store.set_appservice_state(
+ service,
+ ApplicationServiceState.DOWN
+ )
+ rows = yield self.db_pool.runQuery(
+ "SELECT as_id FROM application_services_state WHERE state=?",
+ (ApplicationServiceState.DOWN,)
+ )
+ self.assertEquals(service.id, rows[0][0])
+
+ @defer.inlineCallbacks
+ def test_set_appservices_state_multiple_up(self):
+ service = Mock(id=self.as_list[1]["id"])
+ yield self.store.set_appservice_state(
+ service,
+ ApplicationServiceState.UP
+ )
+ yield self.store.set_appservice_state(
+ service,
+ ApplicationServiceState.DOWN
+ )
+ yield self.store.set_appservice_state(
+ service,
+ ApplicationServiceState.UP
+ )
+ rows = yield self.db_pool.runQuery(
+ "SELECT as_id FROM application_services_state WHERE state=?",
+ (ApplicationServiceState.UP,)
+ )
+ self.assertEquals(service.id, rows[0][0])
+
+ @defer.inlineCallbacks
+ def test_create_appservice_txn_first(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ txn = yield self.store.create_appservice_txn(service, events)
+ self.assertEquals(txn.id, 1)
+ self.assertEquals(txn.events, events)
+ self.assertEquals(txn.service, service)
+
+ @defer.inlineCallbacks
+ def test_create_appservice_txn_older_last_txn(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ yield self._set_last_txn(service.id, 9643) # AS is falling behind
+ yield self._insert_txn(service.id, 9644, events)
+ yield self._insert_txn(service.id, 9645, events)
+ txn = yield self.store.create_appservice_txn(service, events)
+ self.assertEquals(txn.id, 9646)
+ self.assertEquals(txn.events, events)
+ self.assertEquals(txn.service, service)
+
+ @defer.inlineCallbacks
+ def test_create_appservice_txn_up_to_date_last_txn(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ yield self._set_last_txn(service.id, 9643)
+ txn = yield self.store.create_appservice_txn(service, events)
+ self.assertEquals(txn.id, 9644)
+ self.assertEquals(txn.events, events)
+ self.assertEquals(txn.service, service)
+
+ @defer.inlineCallbacks
+ def test_create_appservice_txn_up_fuzzing(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ yield self._set_last_txn(service.id, 9643)
+
+ # dump in rows with higher IDs to make sure the queries aren't wrong.
+ yield self._set_last_txn(self.as_list[1]["id"], 119643)
+ yield self._set_last_txn(self.as_list[2]["id"], 9)
+ yield self._set_last_txn(self.as_list[3]["id"], 9643)
+ yield self._insert_txn(self.as_list[1]["id"], 119644, events)
+ yield self._insert_txn(self.as_list[1]["id"], 119645, events)
+ yield self._insert_txn(self.as_list[1]["id"], 119646, events)
+ yield self._insert_txn(self.as_list[2]["id"], 10, events)
+ yield self._insert_txn(self.as_list[3]["id"], 9643, events)
+
+ txn = yield self.store.create_appservice_txn(service, events)
+ self.assertEquals(txn.id, 9644)
+ self.assertEquals(txn.events, events)
+ self.assertEquals(txn.service, service)
+
+ @defer.inlineCallbacks
+ def test_complete_appservice_txn_first_txn(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ txn_id = 1
+
+ yield self._insert_txn(service.id, txn_id, events)
+ yield self.store.complete_appservice_txn(txn_id=txn_id, service=service)
+
+ res = yield self.db_pool.runQuery(
+ "SELECT last_txn FROM application_services_state WHERE as_id=?",
+ (service.id,)
+ )
+ self.assertEquals(1, len(res))
+ self.assertEquals(txn_id, res[0][0])
+
+ res = yield self.db_pool.runQuery(
+ "SELECT * FROM application_services_txns WHERE txn_id=?",
+ (txn_id,)
+ )
+ self.assertEquals(0, len(res))
+
+ @defer.inlineCallbacks
+ def test_complete_appservice_txn_existing_in_state_table(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ txn_id = 5
+ yield self._set_last_txn(service.id, 4)
+ yield self._insert_txn(service.id, txn_id, events)
+ yield self.store.complete_appservice_txn(txn_id=txn_id, service=service)
+
+ res = yield self.db_pool.runQuery(
+ "SELECT last_txn, state FROM application_services_state WHERE "
+ "as_id=?",
+ (service.id,)
+ )
+ self.assertEquals(1, len(res))
+ self.assertEquals(txn_id, res[0][0])
+ self.assertEquals(ApplicationServiceState.UP, res[0][1])
+
+ res = yield self.db_pool.runQuery(
+ "SELECT * FROM application_services_txns WHERE txn_id=?",
+ (txn_id,)
+ )
+ self.assertEquals(0, len(res))
+
+ @defer.inlineCallbacks
+ def test_get_oldest_unsent_txn_none(self):
+ service = Mock(id=self.as_list[0]["id"])
+
+ txn = yield self.store.get_oldest_unsent_txn(service)
+ self.assertEquals(None, txn)
+
+ @defer.inlineCallbacks
+ def test_get_oldest_unsent_txn(self):
+ service = Mock(id=self.as_list[0]["id"])
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
+
+ # we aren't testing store._base stuff here, so mock this out
+ self.store._get_events_txn = Mock(return_value=events)
+
+ yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
+ yield self._insert_txn(service.id, 10, events)
+ yield self._insert_txn(service.id, 11, other_events)
+ yield self._insert_txn(service.id, 12, other_events)
+
+ txn = yield self.store.get_oldest_unsent_txn(service)
+ self.assertEquals(service, txn.service)
+ self.assertEquals(10, txn.id)
+ self.assertEquals(events, txn.events)
+
+ @defer.inlineCallbacks
+ def test_get_appservices_by_state_single(self):
+ yield self._set_state(
+ self.as_list[0]["id"], ApplicationServiceState.DOWN
+ )
+ yield self._set_state(
+ self.as_list[1]["id"], ApplicationServiceState.UP
+ )
+
+ services = yield self.store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
+ )
+ self.assertEquals(1, len(services))
+ self.assertEquals(self.as_list[0]["id"], services[0].id)
+
+ @defer.inlineCallbacks
+ def test_get_appservices_by_state_multiple(self):
+ yield self._set_state(
+ self.as_list[0]["id"], ApplicationServiceState.DOWN
+ )
+ yield self._set_state(
+ self.as_list[1]["id"], ApplicationServiceState.UP
+ )
+ yield self._set_state(
+ self.as_list[2]["id"], ApplicationServiceState.DOWN
+ )
+ yield self._set_state(
+ self.as_list[3]["id"], ApplicationServiceState.UP
+ )
+
+ services = yield self.store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
+ )
+ self.assertEquals(2, len(services))
+ self.assertEquals(
+ set([self.as_list[2]["id"], self.as_list[0]["id"]]),
+ set([services[0].id, services[1].id])
+ )
+
+
+# required for ApplicationServiceTransactionStoreTestCase tests
+class TestTransactionStore(ApplicationServiceTransactionStore,
+ ApplicationServiceStore):
+
+ def __init__(self, hs):
+ super(TestTransactionStore, self).__init__(hs)
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
new file mode 100644
index 00000000..29289fa9
--- /dev/null
+++ b/tests/storage/test_background_update.py
@@ -0,0 +1,76 @@
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.types import UserID, RoomID, RoomAlias
+
+from tests.utils import setup_test_homeserver
+
+from mock import Mock
+
+class BackgroundUpdateTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver()
+ self.store = hs.get_datastore()
+ self.clock = hs.get_clock()
+
+ self.update_handler = Mock()
+
+ yield self.store.register_background_update_handler(
+ "test_update", self.update_handler
+ )
+
+ @defer.inlineCallbacks
+ def test_do_background_update(self):
+ desired_count = 1000;
+ duration_ms = 42;
+
+ @defer.inlineCallbacks
+ def update(progress, count):
+ self.clock.advance_time_msec(count * duration_ms)
+ progress = {"my_key": progress["my_key"] + 1}
+ yield self.store.runInteraction(
+ "update_progress",
+ self.store._background_update_progress_txn,
+ "test_update",
+ progress,
+ )
+ defer.returnValue(count)
+
+ self.update_handler.side_effect = update
+
+ yield self.store.start_background_update("test_update", {"my_key": 1})
+
+ self.update_handler.reset_mock()
+ result = yield self.store.do_background_update(
+ duration_ms * desired_count
+ )
+ self.assertIsNotNone(result)
+ self.update_handler.assert_called_once_with(
+ {"my_key": 1}, self.store.DEFAULT_BACKGROUND_BATCH_SIZE
+ )
+
+ @defer.inlineCallbacks
+ def update(progress, count):
+ yield self.store._end_background_update("test_update")
+ defer.returnValue(count)
+
+ self.update_handler.side_effect = update
+
+ self.update_handler.reset_mock()
+ result = yield self.store.do_background_update(
+ duration_ms * desired_count
+ )
+ self.assertIsNotNone(result)
+ self.update_handler.assert_called_once_with(
+ {"my_key": 2}, desired_count
+ )
+
+ self.update_handler.reset_mock()
+ result = yield self.store.do_background_update(
+ duration_ms * desired_count
+ )
+ self.assertIsNone(result)
+ self.assertFalse(self.update_handler.called)
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
new file mode 100644
index 00000000..1ddca1da
--- /dev/null
+++ b/tests/storage/test_base.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from mock import Mock, call
+
+from collections import OrderedDict
+
+from synapse.server import HomeServer
+
+from synapse.storage._base import SQLBaseStore
+from synapse.storage.engines import create_engine
+
+
+class SQLBaseStoreTestCase(unittest.TestCase):
+ """ Test the "simple" SQL generating methods in SQLBaseStore. """
+
+ def setUp(self):
+ self.db_pool = Mock(spec=["runInteraction"])
+ self.mock_txn = Mock()
+ self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
+ self.mock_conn.cursor.return_value = self.mock_txn
+ self.mock_conn.rollback.return_value = None
+ # Our fake runInteraction just runs synchronously inline
+
+ def runInteraction(func, *args, **kwargs):
+ return defer.succeed(func(self.mock_txn, *args, **kwargs))
+ self.db_pool.runInteraction = runInteraction
+
+ def runWithConnection(func, *args, **kwargs):
+ return defer.succeed(func(self.mock_conn, *args, **kwargs))
+ self.db_pool.runWithConnection = runWithConnection
+
+ config = Mock()
+ config.event_cache_size = 1
+ hs = HomeServer(
+ "test",
+ db_pool=self.db_pool,
+ config=config,
+ database_engine=create_engine("sqlite3"),
+ )
+
+ self.datastore = SQLBaseStore(hs)
+
+ @defer.inlineCallbacks
+ def test_insert_1col(self):
+ self.mock_txn.rowcount = 1
+
+ yield self.datastore._simple_insert(
+ table="tablename",
+ values={"columname": "Value"}
+ )
+
+ self.mock_txn.execute.assert_called_with(
+ "INSERT INTO tablename (columname) VALUES(?)",
+ ("Value",)
+ )
+
+ @defer.inlineCallbacks
+ def test_insert_3cols(self):
+ self.mock_txn.rowcount = 1
+
+ yield self.datastore._simple_insert(
+ table="tablename",
+ # Use OrderedDict() so we can assert on the SQL generated
+ values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)])
+ )
+
+ self.mock_txn.execute.assert_called_with(
+ "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)",
+ (1, 2, 3,)
+ )
+
+ @defer.inlineCallbacks
+ def test_select_one_1col(self):
+ self.mock_txn.rowcount = 1
+ self.mock_txn.fetchall.return_value = [("Value",)]
+
+ value = yield self.datastore._simple_select_one_onecol(
+ table="tablename",
+ keyvalues={"keycol": "TheKey"},
+ retcol="retcol"
+ )
+
+ self.assertEquals("Value", value)
+ self.mock_txn.execute.assert_called_with(
+ "SELECT retcol FROM tablename WHERE keycol = ?",
+ ["TheKey"]
+ )
+
+ @defer.inlineCallbacks
+ def test_select_one_3col(self):
+ self.mock_txn.rowcount = 1
+ self.mock_txn.fetchone.return_value = (1, 2, 3)
+
+ ret = yield self.datastore._simple_select_one(
+ table="tablename",
+ keyvalues={"keycol": "TheKey"},
+ retcols=["colA", "colB", "colC"]
+ )
+
+ self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret)
+ self.mock_txn.execute.assert_called_with(
+ "SELECT colA, colB, colC FROM tablename WHERE keycol = ?",
+ ["TheKey"]
+ )
+
+ @defer.inlineCallbacks
+ def test_select_one_missing(self):
+ self.mock_txn.rowcount = 0
+ self.mock_txn.fetchone.return_value = None
+
+ ret = yield self.datastore._simple_select_one(
+ table="tablename",
+ keyvalues={"keycol": "Not here"},
+ retcols=["colA"],
+ allow_none=True
+ )
+
+ self.assertFalse(ret)
+
+ @defer.inlineCallbacks
+ def test_select_list(self):
+ self.mock_txn.rowcount = 3;
+ self.mock_txn.fetchall.return_value = ((1,), (2,), (3,))
+ self.mock_txn.description = (
+ ("colA", None, None, None, None, None, None),
+ )
+
+ ret = yield self.datastore._simple_select_list(
+ table="tablename",
+ keyvalues={"keycol": "A set"},
+ retcols=["colA"],
+ )
+
+ self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret)
+ self.mock_txn.execute.assert_called_with(
+ "SELECT colA FROM tablename WHERE keycol = ?",
+ ["A set"]
+ )
+
+ @defer.inlineCallbacks
+ def test_update_one_1col(self):
+ self.mock_txn.rowcount = 1
+
+ yield self.datastore._simple_update_one(
+ table="tablename",
+ keyvalues={"keycol": "TheKey"},
+ updatevalues={"columnname": "New Value"}
+ )
+
+ self.mock_txn.execute.assert_called_with(
+ "UPDATE tablename SET columnname = ? WHERE keycol = ?",
+ ["New Value", "TheKey"]
+ )
+
+ @defer.inlineCallbacks
+ def test_update_one_4cols(self):
+ self.mock_txn.rowcount = 1
+
+ yield self.datastore._simple_update_one(
+ table="tablename",
+ keyvalues=OrderedDict([("colA", 1), ("colB", 2)]),
+ updatevalues=OrderedDict([("colC", 3), ("colD", 4)])
+ )
+
+ self.mock_txn.execute.assert_called_with(
+ "UPDATE tablename SET colC = ?, colD = ? WHERE " +
+ "colA = ? AND colB = ?",
+ [3, 4, 1, 2]
+ )
+
+ @defer.inlineCallbacks
+ def test_delete_one(self):
+ self.mock_txn.rowcount = 1
+
+ yield self.datastore._simple_delete_one(
+ table="tablename",
+ keyvalues={"keycol": "Go away"},
+ )
+
+ self.mock_txn.execute.assert_called_with(
+ "DELETE FROM tablename WHERE keycol = ?",
+ ["Go away"]
+ )
diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py
new file mode 100644
index 00000000..b9bfbc00
--- /dev/null
+++ b/tests/storage/test_directory.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.storage.directory import DirectoryStore
+from synapse.types import RoomID, RoomAlias
+
+from tests.utils import setup_test_homeserver
+
+
+class DirectoryStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver()
+
+ self.store = DirectoryStore(hs)
+
+ self.room = RoomID.from_string("!abcde:test")
+ self.alias = RoomAlias.from_string("#my-room:test")
+
+ @defer.inlineCallbacks
+ def test_room_to_alias(self):
+ yield self.store.create_room_alias_association(
+ room_alias=self.alias,
+ room_id=self.room.to_string(),
+ servers=["test"],
+ )
+
+ self.assertEquals(
+ ["#my-room:test"],
+ (yield self.store.get_aliases_for_room(self.room.to_string()))
+ )
+
+ @defer.inlineCallbacks
+ def test_alias_to_room(self):
+ yield self.store.create_room_alias_association(
+ room_alias=self.alias,
+ room_id=self.room.to_string(),
+ servers=["test"],
+ )
+
+ self.assertObjectHasAttributes(
+ {
+ "room_id": self.room.to_string(),
+ "servers": ["test"],
+ },
+ (yield self.store.get_association_from_room_alias(self.alias))
+ )
+
+ @defer.inlineCallbacks
+ def test_delete_alias(self):
+ yield self.store.create_room_alias_association(
+ room_alias=self.alias,
+ room_id=self.room.to_string(),
+ servers=["test"],
+ )
+
+ room_id = yield self.store.delete_room_alias(self.alias)
+ self.assertEqual(self.room.to_string(), room_id)
+
+ self.assertIsNone(
+ (yield self.store.get_association_from_room_alias(self.alias))
+ )
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
new file mode 100644
index 00000000..31301300
--- /dev/null
+++ b/tests/storage/test_events.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import uuid
+from mock.mock import Mock
+from synapse.types import RoomID, UserID
+
+from tests import unittest
+from twisted.internet import defer
+from tests.storage.event_injector import EventInjector
+
+from tests.utils import setup_test_homeserver
+
+
+class EventsStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ self.hs = yield setup_test_homeserver(
+ resource_for_federation=Mock(),
+ http_client=None,
+ )
+ self.store = self.hs.get_datastore()
+ self.db_pool = self.hs.get_db_pool()
+ self.message_handler = self.hs.get_handlers().message_handler
+ self.event_injector = EventInjector(self.hs)
+
+ @defer.inlineCallbacks
+ def test_count_daily_messages(self):
+ self.db_pool.runQuery("DELETE FROM stats_reporting")
+
+ self.hs.clock.now = 100
+
+ # Never reported before, and nothing which could be reported
+ count = yield self.store.count_daily_messages()
+ self.assertIsNone(count)
+ count = yield self.db_pool.runQuery("SELECT COUNT(*) FROM stats_reporting")
+ self.assertEqual([(0,)], count)
+
+ # Create something to report
+ room = RoomID.from_string("!abc123:test")
+ user = UserID.from_string("@raccoonlover:test")
+ yield self.event_injector.create_room(room)
+
+ self.base_event = yield self._get_last_stream_token()
+
+ yield self.event_injector.inject_message(room, user, "Raccoons are really cute")
+
+ # Never reported before, something could be reported, but isn't because
+ # it isn't old enough.
+ count = yield self.store.count_daily_messages()
+ self.assertIsNone(count)
+ self._assert_stats_reporting(1, self.hs.clock.now)
+
+ # Already reported yesterday, two new events from today.
+ yield self.event_injector.inject_message(room, user, "Yeah they are!")
+ yield self.event_injector.inject_message(room, user, "Incredibly!")
+ self.hs.clock.now += 60 * 60 * 24
+ count = yield self.store.count_daily_messages()
+ self.assertEqual(2, count) # 2 since yesterday
+ self._assert_stats_reporting(3, self.hs.clock.now) # 3 ever
+
+ # Last reported too recently.
+ yield self.event_injector.inject_message(room, user, "Who could disagree?")
+ self.hs.clock.now += 60 * 60 * 22
+ count = yield self.store.count_daily_messages()
+ self.assertIsNone(count)
+ self._assert_stats_reporting(4, self.hs.clock.now)
+
+ # Last reported too long ago
+ yield self.event_injector.inject_message(room, user, "No one.")
+ self.hs.clock.now += 60 * 60 * 26
+ count = yield self.store.count_daily_messages()
+ self.assertIsNone(count)
+ self._assert_stats_reporting(5, self.hs.clock.now)
+
+ # And now let's actually report something
+ yield self.event_injector.inject_message(room, user, "Indeed.")
+ yield self.event_injector.inject_message(room, user, "Indeed.")
+ yield self.event_injector.inject_message(room, user, "Indeed.")
+ # A little over 24 hours is fine :)
+ self.hs.clock.now += (60 * 60 * 24) + 50
+ count = yield self.store.count_daily_messages()
+ self.assertEqual(3, count)
+ self._assert_stats_reporting(8, self.hs.clock.now)
+
+ @defer.inlineCallbacks
+ def _get_last_stream_token(self):
+ rows = yield self.db_pool.runQuery(
+ "SELECT stream_ordering"
+ " FROM events"
+ " ORDER BY stream_ordering DESC"
+ " LIMIT 1"
+ )
+ if not rows:
+ defer.returnValue(0)
+ else:
+ defer.returnValue(rows[0][0])
+
+ @defer.inlineCallbacks
+ def _assert_stats_reporting(self, messages, time):
+ rows = yield self.db_pool.runQuery(
+ "SELECT reported_stream_token, reported_time FROM stats_reporting"
+ )
+ self.assertEqual([(self.base_event + messages, time,)], rows)
diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py
new file mode 100644
index 00000000..065eebdb
--- /dev/null
+++ b/tests/storage/test_presence.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.storage.presence import PresenceStore
+from synapse.types import UserID
+
+from tests.utils import setup_test_homeserver, MockClock
+
+
+class PresenceStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver(clock=MockClock())
+
+ self.store = PresenceStore(hs)
+
+ self.u_apple = UserID.from_string("@apple:test")
+ self.u_banana = UserID.from_string("@banana:test")
+
+ @defer.inlineCallbacks
+ def test_state(self):
+ yield self.store.create_presence(
+ self.u_apple.localpart
+ )
+
+ state = yield self.store.get_presence_state(
+ self.u_apple.localpart
+ )
+
+ self.assertEquals(
+ {"state": None, "status_msg": None, "mtime": None}, state
+ )
+
+ yield self.store.set_presence_state(
+ self.u_apple.localpart, {"state": "online", "status_msg": "Here"}
+ )
+
+ state = yield self.store.get_presence_state(
+ self.u_apple.localpart
+ )
+
+ self.assertEquals(
+ {"state": "online", "status_msg": "Here", "mtime": 1000000}, state
+ )
+
+ @defer.inlineCallbacks
+ def test_visibility(self):
+ self.assertFalse((yield self.store.is_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )))
+
+ yield self.store.allow_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )
+
+ self.assertTrue((yield self.store.is_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )))
+
+ yield self.store.disallow_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )
+
+ self.assertFalse((yield self.store.is_presence_visible(
+ observed_localpart=self.u_apple.localpart,
+ observer_userid=self.u_banana.to_string(),
+ )))
+
+ @defer.inlineCallbacks
+ def test_presence_list(self):
+ self.assertEquals(
+ [],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ ))
+ )
+ self.assertEquals(
+ [],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ accepted=True,
+ ))
+ )
+
+ yield self.store.add_presence_list_pending(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+
+ self.assertEquals(
+ [{"observed_user_id": "@banana:test", "accepted": 0}],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ ))
+ )
+ self.assertEquals(
+ [],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ accepted=True,
+ ))
+ )
+
+ yield self.store.set_presence_list_accepted(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+
+ self.assertEquals(
+ [{"observed_user_id": "@banana:test", "accepted": 1}],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ ))
+ )
+ self.assertEquals(
+ [{"observed_user_id": "@banana:test", "accepted": 1}],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ accepted=True,
+ ))
+ )
+
+ yield self.store.del_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ observed_userid=self.u_banana.to_string(),
+ )
+
+ self.assertEquals(
+ [],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ ))
+ )
+ self.assertEquals(
+ [],
+ (yield self.store.get_presence_list(
+ observer_localpart=self.u_apple.localpart,
+ accepted=True,
+ ))
+ )
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
new file mode 100644
index 00000000..1fa783f3
--- /dev/null
+++ b/tests/storage/test_profile.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.storage.profile import ProfileStore
+from synapse.types import UserID
+
+from tests.utils import setup_test_homeserver
+
+
+class ProfileStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver()
+
+ self.store = ProfileStore(hs)
+
+ self.u_frank = UserID.from_string("@frank:test")
+
+ @defer.inlineCallbacks
+ def test_displayname(self):
+ yield self.store.create_profile(
+ self.u_frank.localpart
+ )
+
+ yield self.store.set_profile_displayname(
+ self.u_frank.localpart, "Frank"
+ )
+
+ self.assertEquals(
+ "Frank",
+ (yield self.store.get_profile_displayname(self.u_frank.localpart))
+ )
+
+ @defer.inlineCallbacks
+ def test_avatar_url(self):
+ yield self.store.create_profile(
+ self.u_frank.localpart
+ )
+
+ yield self.store.set_profile_avatar_url(
+ self.u_frank.localpart, "http://my.site/here"
+ )
+
+ self.assertEquals(
+ "http://my.site/here",
+ (yield self.store.get_profile_avatar_url(self.u_frank.localpart))
+ )
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
new file mode 100644
index 00000000..dbf9700e
--- /dev/null
+++ b/tests/storage/test_redaction.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID, RoomID
+
+from tests.utils import setup_test_homeserver
+
+from mock import Mock
+
+
+class RedactionTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver(
+ resource_for_federation=Mock(),
+ http_client=None,
+ )
+
+ self.store = hs.get_datastore()
+ self.event_builder_factory = hs.get_event_builder_factory()
+ self.handlers = hs.get_handlers()
+ self.message_handler = self.handlers.message_handler
+
+ self.u_alice = UserID.from_string("@alice:test")
+ self.u_bob = UserID.from_string("@bob:test")
+
+ self.room1 = RoomID.from_string("!abc123:test")
+
+ self.depth = 1
+
+ @defer.inlineCallbacks
+ def inject_room_member(self, room, user, membership, replaces_state=None,
+ extra_content={}):
+ content = {"membership": membership}
+ content.update(extra_content)
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": content,
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def inject_message(self, room, user, body):
+ self.depth += 1
+
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Message,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": {"body": body, "msgtype": u"message"},
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def inject_redaction(self, room, event_id, user, reason):
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Redaction,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": {"reason": reason},
+ "redacts": event_id,
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ @defer.inlineCallbacks
+ def test_redact(self):
+ yield self.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+
+ start = yield self.store.get_room_events_max_id()
+
+ msg_event = yield self.inject_message(self.room1, self.u_alice, u"t")
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_alice.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ # Check event has not been redacted:
+ event = results[0]
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {"body": "t", "msgtype": "message"},
+ },
+ event,
+ )
+
+ self.assertFalse("redacted_because" in event.unsigned)
+
+ # Redact event
+ reason = "Because I said so"
+ yield self.inject_redaction(
+ self.room1, msg_event.event_id, self.u_alice, reason
+ )
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_alice.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ # Check redaction
+
+ event = results[0]
+
+ self.assertEqual(msg_event.event_id, event.event_id)
+
+ self.assertTrue("redacted_because" in event.unsigned)
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {},
+ },
+ event,
+ )
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Redaction,
+ "user_id": self.u_alice.to_string(),
+ "content": {"reason": reason},
+ },
+ event.unsigned["redacted_because"],
+ )
+
+ @defer.inlineCallbacks
+ def test_redact_join(self):
+ yield self.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+
+ start = yield self.store.get_room_events_max_id()
+
+ msg_event = yield self.inject_room_member(
+ self.room1, self.u_bob, Membership.JOIN,
+ extra_content={"blue": "red"},
+ )
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_alice.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ # Check event has not been redacted:
+ event = results[0]
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Member,
+ "user_id": self.u_bob.to_string(),
+ "content": {"membership": Membership.JOIN, "blue": "red"},
+ },
+ event,
+ )
+
+ self.assertFalse(hasattr(event, "redacted_because"))
+
+ # Redact event
+ reason = "Because I said so"
+ yield self.inject_redaction(
+ self.room1, msg_event.event_id, self.u_alice, reason
+ )
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_alice.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ # Check redaction
+
+ event = results[0]
+
+ self.assertTrue("redacted_because" in event.unsigned)
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Member,
+ "user_id": self.u_bob.to_string(),
+ "content": {"membership": Membership.JOIN},
+ },
+ event,
+ )
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Redaction,
+ "user_id": self.u_alice.to_string(),
+ "content": {"reason": reason},
+ },
+ event.unsigned["redacted_because"],
+ )
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
new file mode 100644
index 00000000..0cce6c37
--- /dev/null
+++ b/tests/storage/test_registration.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+from synapse.storage.registration import RegistrationStore
+from synapse.util import stringutils
+
+from tests.utils import setup_test_homeserver
+
+
+class RegistrationStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver()
+ self.db_pool = hs.get_db_pool()
+
+ self.store = RegistrationStore(hs)
+
+ self.user_id = "@my-user:test"
+ self.tokens = ["AbCdEfGhIjKlMnOpQrStUvWxYz",
+ "BcDeFgHiJkLmNoPqRsTuVwXyZa"]
+ self.pwhash = "{xx1}123456789"
+
+ @defer.inlineCallbacks
+ def test_register(self):
+ yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
+
+ self.assertEquals(
+ # TODO(paul): Surely this field should be 'user_id', not 'name'
+ # Additionally surely it shouldn't come in a 1-element list
+ {"name": self.user_id, "password_hash": self.pwhash},
+ (yield self.store.get_user_by_id(self.user_id))
+ )
+
+ result = yield self.store.get_user_by_access_token(self.tokens[0])
+
+ self.assertDictContainsSubset(
+ {
+ "name": self.user_id,
+ },
+ result
+ )
+
+ self.assertTrue("token_id" in result)
+
+ @defer.inlineCallbacks
+ def test_add_tokens(self):
+ yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
+ yield self.store.add_access_token_to_user(self.user_id, self.tokens[1])
+
+ result = yield self.store.get_user_by_access_token(self.tokens[1])
+
+ self.assertDictContainsSubset(
+ {
+ "name": self.user_id,
+ },
+ result
+ )
+
+ self.assertTrue("token_id" in result)
+
+ @defer.inlineCallbacks
+ def test_exchange_refresh_token_valid(self):
+ uid = stringutils.random_string(32)
+ generator = TokenGenerator()
+ last_token = generator.generate(uid)
+
+ self.db_pool.runQuery(
+ "INSERT INTO refresh_tokens(user_id, token) VALUES(?,?)",
+ (uid, last_token,))
+
+ (found_user_id, refresh_token) = yield self.store.exchange_refresh_token(
+ last_token, generator.generate)
+ self.assertEqual(uid, found_user_id)
+
+ rows = yield self.db_pool.runQuery(
+ "SELECT token FROM refresh_tokens WHERE user_id = ?", (uid, ))
+ self.assertEqual([(refresh_token,)], rows)
+ # We issued token 1, then exchanged it for token 2
+ expected_refresh_token = u"%s-%d" % (uid, 2,)
+ self.assertEqual(expected_refresh_token, refresh_token)
+
+ @defer.inlineCallbacks
+ def test_exchange_refresh_token_none(self):
+ uid = stringutils.random_string(32)
+ generator = TokenGenerator()
+ last_token = generator.generate(uid)
+
+ with self.assertRaises(StoreError):
+ yield self.store.exchange_refresh_token(last_token, generator.generate)
+
+ @defer.inlineCallbacks
+ def test_exchange_refresh_token_invalid(self):
+ uid = stringutils.random_string(32)
+ generator = TokenGenerator()
+ last_token = generator.generate(uid)
+ wrong_token = "%s-wrong" % (last_token,)
+
+ self.db_pool.runQuery(
+ "INSERT INTO refresh_tokens(user_id, token) VALUES(?,?)",
+ (uid, wrong_token,))
+
+ with self.assertRaises(StoreError):
+ yield self.store.exchange_refresh_token(last_token, generator.generate)
+
+
+class TokenGenerator:
+ def __init__(self):
+ self._last_issued_token = 0
+
+ def generate(self, user_id):
+ self._last_issued_token += 1
+ return u"%s-%d" % (user_id, self._last_issued_token,)
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
new file mode 100644
index 00000000..91c96754
--- /dev/null
+++ b/tests/storage/test_room.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.types import UserID, RoomID, RoomAlias
+
+from tests.utils import setup_test_homeserver
+
+
+class RoomStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver()
+
+ # We can't test RoomStore on its own without the DirectoryStore, for
+ # management of the 'room_aliases' table
+ self.store = hs.get_datastore()
+
+ self.room = RoomID.from_string("!abcde:test")
+ self.alias = RoomAlias.from_string("#a-room-name:test")
+ self.u_creator = UserID.from_string("@creator:test")
+
+ yield self.store.store_room(self.room.to_string(),
+ room_creator_user_id=self.u_creator.to_string(),
+ is_public=True
+ )
+
+ @defer.inlineCallbacks
+ def test_get_room(self):
+ self.assertDictContainsSubset(
+ {"room_id": self.room.to_string(),
+ "creator": self.u_creator.to_string(),
+ "is_public": True},
+ (yield self.store.get_room(self.room.to_string()))
+ )
+
+ @defer.inlineCallbacks
+ def test_get_rooms(self):
+ # get_rooms does an INNER JOIN on the room_aliases table :(
+
+ rooms = yield self.store.get_rooms(is_public=True)
+ # Should be empty before we add the alias
+ self.assertEquals([], rooms)
+
+ yield self.store.create_room_alias_association(
+ room_alias=self.alias,
+ room_id=self.room.to_string(),
+ servers=["test"]
+ )
+
+ rooms = yield self.store.get_rooms(is_public=True)
+
+ self.assertEquals(1, len(rooms))
+ self.assertEquals({
+ "name": None,
+ "room_id": self.room.to_string(),
+ "topic": None,
+ "aliases": [self.alias.to_string()],
+ "world_readable": False,
+ "guest_can_join": False,
+ }, rooms[0])
+
+
+class RoomEventsStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = setup_test_homeserver()
+
+ # Room events need the full datastore, for persist_event() and
+ # get_room_state()
+ self.store = hs.get_datastore()
+ self.event_factory = hs.get_event_factory()
+
+ self.room = RoomID.from_string("!abcde:test")
+
+ yield self.store.store_room(self.room.to_string(),
+ room_creator_user_id="@creator:text",
+ is_public=True
+ )
+
+ @defer.inlineCallbacks
+ def inject_room_event(self, **kwargs):
+ yield self.store.persist_event(
+ self.event_factory.create_event(
+ room_id=self.room.to_string(),
+ **kwargs
+ )
+ )
+
+ @defer.inlineCallbacks
+ def STALE_test_room_name(self):
+ name = u"A-Room-Name"
+
+ yield self.inject_room_event(
+ etype=EventTypes.Name,
+ name=name,
+ content={"name": name},
+ depth=1,
+ )
+
+ state = yield self.store.get_current_state(
+ room_id=self.room.to_string()
+ )
+
+ self.assertEquals(1, len(state))
+ self.assertObjectHasAttributes(
+ {"type": "m.room.name",
+ "room_id": self.room.to_string(),
+ "name": name},
+ state[0]
+ )
+
+ @defer.inlineCallbacks
+ def STALE_test_room_topic(self):
+ topic = u"A place for things"
+
+ yield self.inject_room_event(
+ etype=EventTypes.Topic,
+ topic=topic,
+ content={"topic": topic},
+ depth=1,
+ )
+
+ state = yield self.store.get_current_state(
+ room_id=self.room.to_string()
+ )
+
+ self.assertEquals(1, len(state))
+ self.assertObjectHasAttributes(
+ {"type": "m.room.topic",
+ "room_id": self.room.to_string(),
+ "topic": topic},
+ state[0]
+ )
+
+ # Not testing the various 'level' methods for now because there's lots
+ # of them and need coalescing; see JIRA SPEC-11
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
new file mode 100644
index 00000000..785953cc
--- /dev/null
+++ b/tests/storage/test_roommember.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID, RoomID
+
+from tests.utils import setup_test_homeserver
+
+from mock import Mock
+
+
+class RoomMemberStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver(
+ resource_for_federation=Mock(),
+ http_client=None,
+ )
+ # We can't test the RoomMemberStore on its own without the other event
+ # storage logic
+ self.store = hs.get_datastore()
+ self.event_builder_factory = hs.get_event_builder_factory()
+ self.handlers = hs.get_handlers()
+ self.message_handler = self.handlers.message_handler
+
+ self.u_alice = UserID.from_string("@alice:test")
+ self.u_bob = UserID.from_string("@bob:test")
+
+ # User elsewhere on another host
+ self.u_charlie = UserID.from_string("@charlie:elsewhere")
+
+ self.room = RoomID.from_string("!abc123:test")
+
+ @defer.inlineCallbacks
+ def inject_room_member(self, room, user, membership, replaces_state=None):
+ builder = self.event_builder_factory.new({
+ "type": EventTypes.Member,
+ "sender": user.to_string(),
+ "state_key": user.to_string(),
+ "room_id": room.to_string(),
+ "content": {"membership": membership},
+ })
+
+ event, context = yield self.message_handler._create_new_client_event(
+ builder
+ )
+
+ yield self.store.persist_event(event, context)
+
+ defer.returnValue(event)
+
+ @defer.inlineCallbacks
+ def test_one_member(self):
+ yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
+
+ self.assertEquals(
+ Membership.JOIN,
+ (yield self.store.get_room_member(
+ user_id=self.u_alice.to_string(),
+ room_id=self.room.to_string(),
+ )).membership
+ )
+ self.assertEquals(
+ [self.u_alice.to_string()],
+ [m.user_id for m in (
+ yield self.store.get_room_members(self.room.to_string())
+ )]
+ )
+ self.assertEquals(
+ [self.room.to_string()],
+ [m.room_id for m in (
+ yield self.store.get_rooms_for_user_where_membership_is(
+ self.u_alice.to_string(), [Membership.JOIN]
+ ))
+ ]
+ )
+ self.assertFalse(
+ (yield self.store.user_rooms_intersect(
+ [self.u_alice.to_string(), self.u_bob.to_string()]
+ ))
+ )
+
+ @defer.inlineCallbacks
+ def test_two_members(self):
+ yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
+ yield self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+
+ self.assertEquals(
+ {self.u_alice.to_string(), self.u_bob.to_string()},
+ {m.user_id for m in (
+ yield self.store.get_room_members(self.room.to_string())
+ )}
+ )
+ self.assertTrue(
+ (yield self.store.user_rooms_intersect(
+ [self.u_alice.to_string(), self.u_bob.to_string()]
+ ))
+ )
+
+ @defer.inlineCallbacks
+ def test_room_hosts(self):
+ yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
+
+ self.assertEquals(
+ {"test"},
+ (yield self.store.get_joined_hosts_for_room(self.room.to_string()))
+ )
+
+ # Should still have just one host after second join from it
+ yield self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+
+ self.assertEquals(
+ {"test"},
+ (yield self.store.get_joined_hosts_for_room(self.room.to_string()))
+ )
+
+ # Should now have two hosts after join from other host
+ yield self.inject_room_member(self.room, self.u_charlie, Membership.JOIN)
+
+ self.assertEquals(
+ {"test", "elsewhere"},
+ (yield
+ self.store.get_joined_hosts_for_room(self.room.to_string())
+ )
+ )
+
+ # Should still have both hosts
+ yield self.inject_room_member(self.room, self.u_alice, Membership.LEAVE)
+
+ self.assertEquals(
+ {"test", "elsewhere"},
+ (yield
+ self.store.get_joined_hosts_for_room(self.room.to_string())
+ )
+ )
+
+ # Should have only one host after other leaves
+ yield self.inject_room_member(self.room, self.u_charlie, Membership.LEAVE)
+
+ self.assertEquals(
+ {"test"},
+ (yield self.store.get_joined_hosts_for_room(self.room.to_string()))
+ )
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
new file mode 100644
index 00000000..e5c2c5cc
--- /dev/null
+++ b/tests/storage/test_stream.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.types import UserID, RoomID
+from tests.storage.event_injector import EventInjector
+
+from tests.utils import setup_test_homeserver
+
+from mock import Mock
+
+
+class StreamStoreTestCase(unittest.TestCase):
+
+ @defer.inlineCallbacks
+ def setUp(self):
+ hs = yield setup_test_homeserver(
+ resource_for_federation=Mock(),
+ http_client=None,
+ )
+
+ self.store = hs.get_datastore()
+ self.event_builder_factory = hs.get_event_builder_factory()
+ self.event_injector = EventInjector(hs)
+ self.handlers = hs.get_handlers()
+ self.message_handler = self.handlers.message_handler
+
+ self.u_alice = UserID.from_string("@alice:test")
+ self.u_bob = UserID.from_string("@bob:test")
+
+ self.room1 = RoomID.from_string("!abc123:test")
+ self.room2 = RoomID.from_string("!xyx987:test")
+
+ @defer.inlineCallbacks
+ def test_event_stream_get_other(self):
+ # Both bob and alice joins the room
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_bob, Membership.JOIN
+ )
+
+ # Initial stream key:
+ start = yield self.store.get_room_events_max_id()
+
+ yield self.event_injector.inject_message(self.room1, self.u_alice, u"test")
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_bob.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ event = results[0]
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {"body": "test", "msgtype": "message"},
+ },
+ event,
+ )
+
+ @defer.inlineCallbacks
+ def test_event_stream_get_own(self):
+ # Both bob and alice joins the room
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_bob, Membership.JOIN
+ )
+
+ # Initial stream key:
+ start = yield self.store.get_room_events_max_id()
+
+ yield self.event_injector.inject_message(self.room1, self.u_alice, u"test")
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_alice.to_string(),
+ start,
+ end,
+ )
+
+ self.assertEqual(1, len(results))
+
+ event = results[0]
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {"body": "test", "msgtype": "message"},
+ },
+ event,
+ )
+
+ @defer.inlineCallbacks
+ def test_event_stream_join_leave(self):
+ # Both bob and alice joins the room
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_bob, Membership.JOIN
+ )
+
+ # Then bob leaves again.
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_bob, Membership.LEAVE
+ )
+
+ # Initial stream key:
+ start = yield self.store.get_room_events_max_id()
+
+ yield self.event_injector.inject_message(self.room1, self.u_alice, u"test")
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_bob.to_string(),
+ start,
+ end,
+ )
+
+ # We should not get the message, as it happened *after* bob left.
+ self.assertEqual(0, len(results))
+
+ @defer.inlineCallbacks
+ def test_event_stream_prev_content(self):
+ yield self.event_injector.inject_room_member(
+ self.room1, self.u_bob, Membership.JOIN
+ )
+
+ event1 = yield self.event_injector.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN
+ )
+
+ start = yield self.store.get_room_events_max_id()
+
+ event2 = yield self.event_injector.inject_room_member(
+ self.room1, self.u_alice, Membership.JOIN,
+ )
+
+ end = yield self.store.get_room_events_max_id()
+
+ results, _ = yield self.store.get_room_events_stream(
+ self.u_bob.to_string(),
+ start,
+ end,
+ )
+
+ # We should not get the message, as it happened *after* bob left.
+ self.assertEqual(1, len(results))
+
+ event = results[0]
+
+ self.assertTrue(
+ "prev_content" in event.unsigned,
+ msg="No prev_content key"
+ )
diff --git a/tests/test_distributor.py b/tests/test_distributor.py
new file mode 100644
index 00000000..8ed48cfb
--- /dev/null
+++ b/tests/test_distributor.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import unittest
+from twisted.internet import defer
+
+from mock import Mock, patch
+
+from synapse.util.distributor import Distributor
+from synapse.util.async import run_on_reactor
+
+
+class DistributorTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.dist = Distributor()
+
+ @defer.inlineCallbacks
+ def test_signal_dispatch(self):
+ self.dist.declare("alert")
+
+ observer = Mock()
+ self.dist.observe("alert", observer)
+
+ d = self.dist.fire("alert", 1, 2, 3)
+ yield d
+ self.assertTrue(d.called)
+ observer.assert_called_with(1, 2, 3)
+
+ @defer.inlineCallbacks
+ def test_signal_dispatch_deferred(self):
+ self.dist.declare("whine")
+
+ d_inner = defer.Deferred()
+ def observer():
+ return d_inner
+ self.dist.observe("whine", observer)
+
+ d_outer = self.dist.fire("whine")
+
+ self.assertFalse(d_outer.called)
+
+ d_inner.callback(None)
+ yield d_outer
+ self.assertTrue(d_outer.called)
+
+ @defer.inlineCallbacks
+ def test_signal_catch(self):
+ self.dist.declare("alarm")
+
+ observers = [Mock() for i in 1, 2]
+ for o in observers:
+ self.dist.observe("alarm", o)
+
+ observers[0].side_effect = Exception("Awoogah!")
+
+ with patch("synapse.util.distributor.logger",
+ spec=["warning"]
+ ) as mock_logger:
+ d = self.dist.fire("alarm", "Go")
+ yield d
+ self.assertTrue(d.called)
+
+ observers[0].assert_called_once_with("Go")
+ observers[1].assert_called_once_with("Go")
+
+ self.assertEquals(mock_logger.warning.call_count, 1)
+ self.assertIsInstance(mock_logger.warning.call_args[0][0],
+ str)
+
+ @defer.inlineCallbacks
+ def test_signal_catch_no_suppress(self):
+ # Gut-wrenching
+ self.dist.suppress_failures = False
+
+ self.dist.declare("whail")
+
+ class MyException(Exception):
+ pass
+
+ @defer.inlineCallbacks
+ def observer():
+ yield run_on_reactor()
+ raise MyException("Oopsie")
+
+ self.dist.observe("whail", observer)
+
+ d = self.dist.fire("whail")
+
+ yield self.assertFailure(d, MyException)
+ self.dist.suppress_failures = True
+
+ @defer.inlineCallbacks
+ def test_signal_prereg(self):
+ observer = Mock()
+ self.dist.observe("flare", observer)
+
+ self.dist.declare("flare")
+ yield self.dist.fire("flare", 4, 5)
+
+ observer.assert_called_with(4, 5)
+
+ def test_signal_undeclared(self):
+ def code():
+ self.dist.fire("notification")
+ self.assertRaises(KeyError, code)
diff --git a/tests/test_state.py b/tests/test_state.py
new file mode 100644
index 00000000..e4e995b7
--- /dev/null
+++ b/tests/test_state.py
@@ -0,0 +1,641 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+from twisted.internet import defer
+
+from synapse.events import FrozenEvent
+from synapse.api.auth import Auth
+from synapse.api.constants import EventTypes, Membership
+from synapse.state import StateHandler
+
+from .utils import MockClock
+
+from mock import Mock
+
+
+_next_event_id = 1000
+
+
+def create_event(name=None, type=None, state_key=None, depth=2, event_id=None,
+ prev_events=[], **kwargs):
+ global _next_event_id
+
+ if not event_id:
+ _next_event_id += 1
+ event_id = "$%s:test" % (_next_event_id,)
+
+ if not name:
+ if state_key is not None:
+ name = "<%s-%s, %s>" % (type, state_key, event_id,)
+ else:
+ name = "<%s, %s>" % (type, event_id,)
+
+ d = {
+ "event_id": event_id,
+ "type": type,
+ "sender": "@user_id:example.com",
+ "room_id": "!room_id:example.com",
+ "depth": depth,
+ "prev_events": prev_events,
+ }
+
+ if state_key is not None:
+ d["state_key"] = state_key
+
+ d.update(kwargs)
+
+ event = FrozenEvent(d)
+
+ return event
+
+
+class StateGroupStore(object):
+ def __init__(self):
+ self._event_to_state_group = {}
+ self._group_to_state = {}
+
+ self._next_group = 1
+
+ def get_state_groups(self, room_id, event_ids):
+ groups = {}
+ for event_id in event_ids:
+ group = self._event_to_state_group.get(event_id)
+ if group:
+ groups[group] = self._group_to_state[group]
+
+ return defer.succeed(groups)
+
+ def store_state_groups(self, event, context):
+ if context.current_state is None:
+ return
+
+ state_events = context.current_state
+
+ if event.is_state():
+ state_events[(event.type, event.state_key)] = event
+
+ state_group = context.state_group
+ if not state_group:
+ state_group = self._next_group
+ self._next_group += 1
+
+ self._group_to_state[state_group] = state_events.values()
+
+ self._event_to_state_group[event.event_id] = state_group
+
+
+class DictObj(dict):
+ def __init__(self, **kwargs):
+ super(DictObj, self).__init__(kwargs)
+ self.__dict__ = self
+
+
+class Graph(object):
+ def __init__(self, nodes, edges):
+ events = {}
+ clobbered = set(events.keys())
+
+ for event_id, fields in nodes.items():
+ refs = edges.get(event_id)
+ if refs:
+ clobbered.difference_update(refs)
+ prev_events = [(r, {}) for r in refs]
+ else:
+ prev_events = []
+
+ events[event_id] = create_event(
+ event_id=event_id,
+ prev_events=prev_events,
+ **fields
+ )
+
+ self._leaves = clobbered
+ self._events = sorted(events.values(), key=lambda e: e.depth)
+
+ def walk(self):
+ return iter(self._events)
+
+ def get_leaves(self):
+ return (self._events[i] for i in self._leaves)
+
+
+class StateTestCase(unittest.TestCase):
+ def setUp(self):
+ self.store = Mock(
+ spec_set=[
+ "get_state_groups",
+ "add_event_hashes",
+ ]
+ )
+ hs = Mock(spec=[
+ "get_datastore", "get_auth", "get_state_handler", "get_clock",
+ ])
+ hs.get_datastore.return_value = self.store
+ hs.get_state_handler.return_value = None
+ hs.get_auth.return_value = Auth(hs)
+ hs.get_clock.return_value = MockClock()
+
+ self.state = StateHandler(hs)
+ self.event_id = 0
+
+ @defer.inlineCallbacks
+ def test_branch_no_conflict(self):
+ graph = Graph(
+ nodes={
+ "START": DictObj(
+ type=EventTypes.Create,
+ state_key="",
+ depth=1,
+ ),
+ "A": DictObj(
+ type=EventTypes.Message,
+ depth=2,
+ ),
+ "B": DictObj(
+ type=EventTypes.Message,
+ depth=3,
+ ),
+ "C": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ depth=3,
+ ),
+ "D": DictObj(
+ type=EventTypes.Message,
+ depth=4,
+ ),
+ },
+ edges={
+ "A": ["START"],
+ "B": ["A"],
+ "C": ["A"],
+ "D": ["B", "C"]
+ }
+ )
+
+ store = StateGroupStore()
+ self.store.get_state_groups.side_effect = store.get_state_groups
+
+ context_store = {}
+
+ for event in graph.walk():
+ context = yield self.state.compute_event_context(event)
+ store.store_state_groups(event, context)
+ context_store[event.event_id] = context
+
+ self.assertEqual(2, len(context_store["D"].current_state))
+
+ @defer.inlineCallbacks
+ def test_branch_basic_conflict(self):
+ graph = Graph(
+ nodes={
+ "START": DictObj(
+ type=EventTypes.Create,
+ state_key="",
+ content={"creator": "@user_id:example.com"},
+ depth=1,
+ ),
+ "A": DictObj(
+ type=EventTypes.Member,
+ state_key="@user_id:example.com",
+ content={"membership": Membership.JOIN},
+ membership=Membership.JOIN,
+ depth=2,
+ ),
+ "B": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ depth=3,
+ ),
+ "C": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ depth=4,
+ ),
+ "D": DictObj(
+ type=EventTypes.Message,
+ depth=5,
+ ),
+ },
+ edges={
+ "A": ["START"],
+ "B": ["A"],
+ "C": ["A"],
+ "D": ["B", "C"]
+ }
+ )
+
+ store = StateGroupStore()
+ self.store.get_state_groups.side_effect = store.get_state_groups
+
+ context_store = {}
+
+ for event in graph.walk():
+ context = yield self.state.compute_event_context(event)
+ store.store_state_groups(event, context)
+ context_store[event.event_id] = context
+
+ self.assertSetEqual(
+ {"START", "A", "C"},
+ {e.event_id for e in context_store["D"].current_state.values()}
+ )
+
+ @defer.inlineCallbacks
+ def test_branch_have_banned_conflict(self):
+ graph = Graph(
+ nodes={
+ "START": DictObj(
+ type=EventTypes.Create,
+ state_key="",
+ content={"creator": "@user_id:example.com"},
+ depth=1,
+ ),
+ "A": DictObj(
+ type=EventTypes.Member,
+ state_key="@user_id:example.com",
+ content={"membership": Membership.JOIN},
+ membership=Membership.JOIN,
+ depth=2,
+ ),
+ "B": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ depth=3,
+ ),
+ "C": DictObj(
+ type=EventTypes.Member,
+ state_key="@user_id_2:example.com",
+ content={"membership": Membership.BAN},
+ membership=Membership.BAN,
+ depth=4,
+ ),
+ "D": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ depth=4,
+ sender="@user_id_2:example.com",
+ ),
+ "E": DictObj(
+ type=EventTypes.Message,
+ depth=5,
+ ),
+ },
+ edges={
+ "A": ["START"],
+ "B": ["A"],
+ "C": ["B"],
+ "D": ["B"],
+ "E": ["C", "D"]
+ }
+ )
+
+ store = StateGroupStore()
+ self.store.get_state_groups.side_effect = store.get_state_groups
+
+ context_store = {}
+
+ for event in graph.walk():
+ context = yield self.state.compute_event_context(event)
+ store.store_state_groups(event, context)
+ context_store[event.event_id] = context
+
+ self.assertSetEqual(
+ {"START", "A", "B", "C"},
+ {e.event_id for e in context_store["E"].current_state.values()}
+ )
+
+ @defer.inlineCallbacks
+ def test_branch_have_perms_conflict(self):
+ userid1 = "@user_id:example.com"
+ userid2 = "@user_id2:example.com"
+
+ nodes = {
+ "A1": DictObj(
+ type=EventTypes.Create,
+ state_key="",
+ content={"creator": userid1},
+ depth=1,
+ ),
+ "A2": DictObj(
+ type=EventTypes.Member,
+ state_key=userid1,
+ content={"membership": Membership.JOIN},
+ membership=Membership.JOIN,
+ ),
+ "A3": DictObj(
+ type=EventTypes.Member,
+ state_key=userid2,
+ content={"membership": Membership.JOIN},
+ membership=Membership.JOIN,
+ ),
+ "A4": DictObj(
+ type=EventTypes.PowerLevels,
+ state_key="",
+ content={
+ "events": {"m.room.name": 50},
+ "users": {userid1: 100,
+ userid2: 60},
+ },
+ ),
+ "A5": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ ),
+ "B": DictObj(
+ type=EventTypes.PowerLevels,
+ state_key="",
+ content={
+ "events": {"m.room.name": 50},
+ "users": {userid2: 30},
+ },
+ ),
+ "C": DictObj(
+ type=EventTypes.Name,
+ state_key="",
+ sender=userid2,
+ ),
+ "D": DictObj(
+ type=EventTypes.Message,
+ ),
+ }
+ edges = {
+ "A2": ["A1"],
+ "A3": ["A2"],
+ "A4": ["A3"],
+ "A5": ["A4"],
+ "B": ["A5"],
+ "C": ["A5"],
+ "D": ["B", "C"]
+ }
+ self._add_depths(nodes, edges)
+ graph = Graph(nodes, edges)
+
+ store = StateGroupStore()
+ self.store.get_state_groups.side_effect = store.get_state_groups
+
+ context_store = {}
+
+ for event in graph.walk():
+ context = yield self.state.compute_event_context(event)
+ store.store_state_groups(event, context)
+ context_store[event.event_id] = context
+
+ self.assertSetEqual(
+ {"A1", "A2", "A3", "A5", "B"},
+ {e.event_id for e in context_store["D"].current_state.values()}
+ )
+
+ def _add_depths(self, nodes, edges):
+ def _get_depth(ev):
+ node = nodes[ev]
+ if 'depth' not in node:
+ prevs = edges[ev]
+ depth = max(_get_depth(prev) for prev in prevs) + 1
+ node['depth'] = depth
+ return node['depth']
+
+ for n in nodes:
+ _get_depth(n)
+
+ @defer.inlineCallbacks
+ def test_annotate_with_old_message(self):
+ event = create_event(type="test_message", name="event")
+
+ old_state = [
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ context = yield self.state.compute_event_context(
+ event, old_state=old_state
+ )
+
+ for k, v in context.current_state.items():
+ type, state_key = k
+ self.assertEqual(type, v.type)
+ self.assertEqual(state_key, v.state_key)
+
+ self.assertEqual(
+ set(old_state), set(context.current_state.values())
+ )
+
+ self.assertIsNone(context.state_group)
+
+ @defer.inlineCallbacks
+ def test_annotate_with_old_state(self):
+ event = create_event(type="state", state_key="", name="event")
+
+ old_state = [
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ context = yield self.state.compute_event_context(
+ event, old_state=old_state
+ )
+
+ for k, v in context.current_state.items():
+ type, state_key = k
+ self.assertEqual(type, v.type)
+ self.assertEqual(state_key, v.state_key)
+
+ self.assertEqual(
+ set(old_state),
+ set(context.current_state.values())
+ )
+
+ self.assertIsNone(context.state_group)
+
+ @defer.inlineCallbacks
+ def test_trivial_annotate_message(self):
+ event = create_event(type="test_message", name="event")
+
+ old_state = [
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ group_name = "group_name_1"
+
+ self.store.get_state_groups.return_value = {
+ group_name: old_state,
+ }
+
+ context = yield self.state.compute_event_context(event)
+
+ for k, v in context.current_state.items():
+ type, state_key = k
+ self.assertEqual(type, v.type)
+ self.assertEqual(state_key, v.state_key)
+
+ self.assertEqual(
+ set([e.event_id for e in old_state]),
+ set([e.event_id for e in context.current_state.values()])
+ )
+
+ self.assertEqual(group_name, context.state_group)
+
+ @defer.inlineCallbacks
+ def test_trivial_annotate_state(self):
+ event = create_event(type="state", state_key="", name="event")
+
+ old_state = [
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ group_name = "group_name_1"
+
+ self.store.get_state_groups.return_value = {
+ group_name: old_state,
+ }
+
+ context = yield self.state.compute_event_context(event)
+
+ for k, v in context.current_state.items():
+ type, state_key = k
+ self.assertEqual(type, v.type)
+ self.assertEqual(state_key, v.state_key)
+
+ self.assertEqual(
+ set([e.event_id for e in old_state]),
+ set([e.event_id for e in context.current_state.values()])
+ )
+
+ self.assertIsNone(context.state_group)
+
+ @defer.inlineCallbacks
+ def test_resolve_message_conflict(self):
+ event = create_event(type="test_message", name="event")
+
+ creation = create_event(
+ type=EventTypes.Create, state_key=""
+ )
+
+ old_state_1 = [
+ creation,
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ old_state_2 = [
+ creation,
+ create_event(type="test1", state_key="1"),
+ create_event(type="test3", state_key="2"),
+ create_event(type="test4", state_key=""),
+ ]
+
+ context = yield self._get_context(event, old_state_1, old_state_2)
+
+ self.assertEqual(len(context.current_state), 6)
+
+ self.assertIsNone(context.state_group)
+
+ @defer.inlineCallbacks
+ def test_resolve_state_conflict(self):
+ event = create_event(type="test4", state_key="", name="event")
+
+ creation = create_event(
+ type=EventTypes.Create, state_key=""
+ )
+
+ old_state_1 = [
+ creation,
+ create_event(type="test1", state_key="1"),
+ create_event(type="test1", state_key="2"),
+ create_event(type="test2", state_key=""),
+ ]
+
+ old_state_2 = [
+ creation,
+ create_event(type="test1", state_key="1"),
+ create_event(type="test3", state_key="2"),
+ create_event(type="test4", state_key=""),
+ ]
+
+ context = yield self._get_context(event, old_state_1, old_state_2)
+
+ self.assertEqual(len(context.current_state), 6)
+
+ self.assertIsNone(context.state_group)
+
+ @defer.inlineCallbacks
+ def test_standard_depth_conflict(self):
+ event = create_event(type="test4", name="event")
+
+ member_event = create_event(
+ type=EventTypes.Member,
+ state_key="@user_id:example.com",
+ content={
+ "membership": Membership.JOIN,
+ }
+ )
+
+ creation = create_event(
+ type=EventTypes.Create, state_key="",
+ content={"creator": "@foo:bar"}
+ )
+
+ old_state_1 = [
+ creation,
+ member_event,
+ create_event(type="test1", state_key="1", depth=1),
+ ]
+
+ old_state_2 = [
+ creation,
+ member_event,
+ create_event(type="test1", state_key="1", depth=2),
+ ]
+
+ context = yield self._get_context(event, old_state_1, old_state_2)
+
+ self.assertEqual(old_state_2[2], context.current_state[("test1", "1")])
+
+ # Reverse the depth to make sure we are actually using the depths
+ # during state resolution.
+
+ old_state_1 = [
+ creation,
+ member_event,
+ create_event(type="test1", state_key="1", depth=2),
+ ]
+
+ old_state_2 = [
+ creation,
+ member_event,
+ create_event(type="test1", state_key="1", depth=1),
+ ]
+
+ context = yield self._get_context(event, old_state_1, old_state_2)
+
+ self.assertEqual(old_state_1[2], context.current_state[("test1", "1")])
+
+ def _get_context(self, event, old_state_1, old_state_2):
+ group_name_1 = "group_name_1"
+ group_name_2 = "group_name_2"
+
+ self.store.get_state_groups.return_value = {
+ group_name_1: old_state_1,
+ group_name_2: old_state_2,
+ }
+
+ return self.state.compute_event_context(event)
diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py
new file mode 100644
index 00000000..b42787dd
--- /dev/null
+++ b/tests/test_test_utils.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+
+from tests.utils import MockClock
+
+class MockClockTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.clock = MockClock()
+
+ def test_advance_time(self):
+ start_time = self.clock.time()
+
+ self.clock.advance_time(20)
+
+ self.assertEquals(20, self.clock.time() - start_time)
+
+ def test_later(self):
+ invoked = [0, 0]
+
+ def _cb0():
+ invoked[0] = 1
+ self.clock.call_later(10, _cb0)
+
+ def _cb1():
+ invoked[1] = 1
+ self.clock.call_later(20, _cb1)
+
+ self.assertFalse(invoked[0])
+
+ self.clock.advance_time(15)
+
+ self.assertTrue(invoked[0])
+ self.assertFalse(invoked[1])
+
+ self.clock.advance_time(5)
+
+ self.assertTrue(invoked[1])
+
+ def test_cancel_later(self):
+ invoked = [0, 0]
+
+ def _cb0():
+ invoked[0] = 1
+ t0 = self.clock.call_later(10, _cb0)
+
+ def _cb1():
+ invoked[1] = 1
+ t1 = self.clock.call_later(20, _cb1)
+
+ self.clock.cancel_call_later(t0)
+
+ self.clock.advance_time(30)
+
+ self.assertFalse(invoked[0])
+ self.assertTrue(invoked[1])
diff --git a/tests/test_types.py b/tests/test_types.py
new file mode 100644
index 00000000..495cd20f
--- /dev/null
+++ b/tests/test_types.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests import unittest
+
+from synapse.api.errors import SynapseError
+from synapse.server import BaseHomeServer
+from synapse.types import UserID, RoomAlias
+
+mock_homeserver = BaseHomeServer(hostname="my.domain")
+
+
+class UserIDTestCase(unittest.TestCase):
+ def test_parse(self):
+ user = UserID.from_string("@1234abcd:my.domain")
+
+ self.assertEquals("1234abcd", user.localpart)
+ self.assertEquals("my.domain", user.domain)
+ self.assertEquals(True, mock_homeserver.is_mine(user))
+
+ def test_pase_empty(self):
+ with self.assertRaises(SynapseError):
+ UserID.from_string("")
+
+
+ def test_build(self):
+ user = UserID("5678efgh", "my.domain")
+
+ self.assertEquals(user.to_string(), "@5678efgh:my.domain")
+
+ def test_compare(self):
+ userA = UserID.from_string("@userA:my.domain")
+ userAagain = UserID.from_string("@userA:my.domain")
+ userB = UserID.from_string("@userB:my.domain")
+
+ self.assertTrue(userA == userAagain)
+ self.assertTrue(userA != userB)
+
+
+class RoomAliasTestCase(unittest.TestCase):
+ def test_parse(self):
+ room = RoomAlias.from_string("#channel:my.domain")
+
+ self.assertEquals("channel", room.localpart)
+ self.assertEquals("my.domain", room.domain)
+ self.assertEquals(True, mock_homeserver.is_mine(room))
+
+ def test_build(self):
+ room = RoomAlias("channel", "my.domain")
+
+ self.assertEquals(room.to_string(), "#channel:my.domain")
diff --git a/tests/unittest.py b/tests/unittest.py
new file mode 100644
index 00000000..fe26b757
--- /dev/null
+++ b/tests/unittest.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.trial import unittest
+
+import logging
+
+
+# logging doesn't have a "don't log anything at all EVARRRR setting,
+# but since the highest value is 50, 1000000 should do ;)
+NEVER = 1000000
+
+logging.getLogger().addHandler(logging.StreamHandler())
+logging.getLogger().setLevel(NEVER)
+
+
+def around(target):
+ """A CLOS-style 'around' modifier, which wraps the original method of the
+ given instance with another piece of code.
+
+ @around(self)
+ def method_name(orig, *args, **kwargs):
+ return orig(*args, **kwargs)
+ """
+ def _around(code):
+ name = code.__name__
+ orig = getattr(target, name)
+ def new(*args, **kwargs):
+ return code(orig, *args, **kwargs)
+ setattr(target, name, new)
+ return _around
+
+
+class TestCase(unittest.TestCase):
+ """A subclass of twisted.trial's TestCase which looks for 'loglevel'
+ attributes on both itself and its individual test methods, to override the
+ root logger's logging level while that test (case|method) runs."""
+
+ def __init__(self, methodName, *args, **kwargs):
+ super(TestCase, self).__init__(methodName, *args, **kwargs)
+
+ method = getattr(self, methodName)
+
+ level = getattr(method, "loglevel",
+ getattr(self, "loglevel",
+ NEVER))
+
+ @around(self)
+ def setUp(orig):
+ old_level = logging.getLogger().level
+
+ if old_level != level:
+ @around(self)
+ def tearDown(orig):
+ ret = orig()
+ logging.getLogger().setLevel(old_level)
+ return ret
+
+ logging.getLogger().setLevel(level)
+ # Don't set SQL logging
+ logging.getLogger("synapse.storage").setLevel(old_level)
+ return orig()
+
+ def assertObjectHasAttributes(self, attrs, obj):
+ """Asserts that the given object has each of the attributes given, and
+ that the value of each matches according to assertEquals."""
+ for (key, value) in attrs.items():
+ if not hasattr(obj, key):
+ raise AssertionError("Expected obj to have a '.%s'" % key)
+ try:
+ self.assertEquals(attrs[key], getattr(obj, key))
+ except AssertionError as e:
+ raise (type(e))(e.message + " for '.%s'" % key)
+
+
+def DEBUG(target):
+ """A decorator to set the .loglevel attribute to logging.DEBUG.
+ Can apply to either a TestCase or an individual test method."""
+ target.loglevel = logging.DEBUG
+ return target
diff --git a/tests/util/__init__.py b/tests/util/__init__.py
new file mode 100644
index 00000000..9bff9ec1
--- /dev/null
+++ b/tests/util/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py
new file mode 100644
index 00000000..54ff26cd
--- /dev/null
+++ b/tests/util/test_dict_cache.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.internet import defer
+from tests import unittest
+
+from synapse.util.caches.dictionary_cache import DictionaryCache
+
+
+class DictCacheTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.cache = DictionaryCache("foobar")
+
+ def test_simple_cache_hit_full(self):
+ key = "test_simple_cache_hit_full"
+
+ v = self.cache.get(key)
+ self.assertEqual((False, {}), v)
+
+ seq = self.cache.sequence
+ test_value = {"test": "test_simple_cache_hit_full"}
+ self.cache.update(seq, key, test_value, full=True)
+
+ c = self.cache.get(key)
+ self.assertEqual(test_value, c.value)
+
+ def test_simple_cache_hit_partial(self):
+ key = "test_simple_cache_hit_partial"
+
+ seq = self.cache.sequence
+ test_value = {
+ "test": "test_simple_cache_hit_partial"
+ }
+ self.cache.update(seq, key, test_value, full=True)
+
+ c = self.cache.get(key, ["test"])
+ self.assertEqual(test_value, c.value)
+
+ def test_simple_cache_miss_partial(self):
+ key = "test_simple_cache_miss_partial"
+
+ seq = self.cache.sequence
+ test_value = {
+ "test": "test_simple_cache_miss_partial"
+ }
+ self.cache.update(seq, key, test_value, full=True)
+
+ c = self.cache.get(key, ["test2"])
+ self.assertEqual({}, c.value)
+
+ def test_simple_cache_hit_miss_partial(self):
+ key = "test_simple_cache_hit_miss_partial"
+
+ seq = self.cache.sequence
+ test_value = {
+ "test": "test_simple_cache_hit_miss_partial",
+ "test2": "test_simple_cache_hit_miss_partial2",
+ "test3": "test_simple_cache_hit_miss_partial3",
+ }
+ self.cache.update(seq, key, test_value, full=True)
+
+ c = self.cache.get(key, ["test2"])
+ self.assertEqual({"test2": "test_simple_cache_hit_miss_partial2"}, c.value)
+
+ def test_multi_insert(self):
+ key = "test_simple_cache_hit_miss_partial"
+
+ seq = self.cache.sequence
+ test_value_1 = {
+ "test": "test_simple_cache_hit_miss_partial",
+ }
+ self.cache.update(seq, key, test_value_1, full=False)
+
+ seq = self.cache.sequence
+ test_value_2 = {
+ "test2": "test_simple_cache_hit_miss_partial2",
+ }
+ self.cache.update(seq, key, test_value_2, full=False)
+
+ c = self.cache.get(key)
+ self.assertEqual(
+ {
+ "test": "test_simple_cache_hit_miss_partial",
+ "test2": "test_simple_cache_hit_miss_partial2",
+ },
+ c.value
+ )
diff --git a/tests/util/test_log_context.py b/tests/util/test_log_context.py
new file mode 100644
index 00000000..efa0f28b
--- /dev/null
+++ b/tests/util/test_log_context.py
@@ -0,0 +1,43 @@
+from twisted.internet import defer
+from twisted.internet import reactor
+from .. import unittest
+
+from synapse.util.async import sleep
+from synapse.util.logcontext import LoggingContext
+
+class LoggingContextTestCase(unittest.TestCase):
+
+ def _check_test_key(self, value):
+ self.assertEquals(
+ LoggingContext.current_context().test_key, value
+ )
+
+ def test_with_context(self):
+ with LoggingContext() as context_one:
+ context_one.test_key = "test"
+ self._check_test_key("test")
+
+ def test_chaining(self):
+ with LoggingContext() as context_one:
+ context_one.test_key = "one"
+ with LoggingContext() as context_two:
+ self._check_test_key("one")
+ context_two.test_key = "two"
+ self._check_test_key("two")
+ self._check_test_key("one")
+
+ @defer.inlineCallbacks
+ def test_sleep(self):
+ @defer.inlineCallbacks
+ def competing_callback():
+ with LoggingContext() as competing_context:
+ competing_context.test_key = "competing"
+ yield sleep(0)
+ self._check_test_key("competing")
+
+ reactor.callLater(0, competing_callback)
+
+ with LoggingContext() as context_one:
+ context_one.test_key = "one"
+ yield sleep(0)
+ self._check_test_key("one")
diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py
new file mode 100644
index 00000000..fc5a9043
--- /dev/null
+++ b/tests/util/test_lrucache.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .. import unittest
+
+from synapse.util.caches.lrucache import LruCache
+
+class LruCacheTestCase(unittest.TestCase):
+
+ def test_get_set(self):
+ cache = LruCache(1)
+ cache["key"] = "value"
+ self.assertEquals(cache.get("key"), "value")
+ self.assertEquals(cache["key"], "value")
+
+ def test_eviction(self):
+ cache = LruCache(2)
+ cache[1] = 1
+ cache[2] = 2
+
+ self.assertEquals(cache.get(1), 1)
+ self.assertEquals(cache.get(2), 2)
+
+ cache[3] = 3
+
+ self.assertEquals(cache.get(1), None)
+ self.assertEquals(cache.get(2), 2)
+ self.assertEquals(cache.get(3), 3)
+
+ def test_setdefault(self):
+ cache = LruCache(1)
+ self.assertEquals(cache.setdefault("key", 1), 1)
+ self.assertEquals(cache.get("key"), 1)
+ self.assertEquals(cache.setdefault("key", 2), 1)
+ self.assertEquals(cache.get("key"), 1)
+
+ def test_pop(self):
+ cache = LruCache(1)
+ cache["key"] = 1
+ self.assertEquals(cache.pop("key"), 1)
+ self.assertEquals(cache.pop("key"), None)
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 00000000..91040c2e
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,479 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import HttpServer
+from synapse.api.errors import cs_error, CodeMessageException, StoreError
+from synapse.api.constants import EventTypes
+from synapse.storage.prepare_database import prepare_database
+from synapse.storage.engines import create_engine
+from synapse.server import HomeServer
+
+from synapse.util.logcontext import LoggingContext
+
+from twisted.internet import defer, reactor
+from twisted.enterprise.adbapi import ConnectionPool
+
+from collections import namedtuple
+from mock import patch, Mock
+import hashlib
+import urllib
+import urlparse
+
+from inspect import getcallargs
+
+
+@defer.inlineCallbacks
+def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
+ """Setup a homeserver suitable for running tests against. Keyword arguments
+ are passed to the Homeserver constructor. If no datastore is supplied a
+ datastore backed by an in-memory sqlite db will be given to the HS.
+ """
+ if config is None:
+ config = Mock()
+ config.signing_key = [MockKey()]
+ config.event_cache_size = 1
+ config.disable_registration = False
+ config.macaroon_secret_key = "not even a little secret"
+ config.server_name = "server.under.test"
+
+ if "clock" not in kargs:
+ kargs["clock"] = MockClock()
+
+ if datastore is None:
+ db_pool = SQLiteMemoryDbPool()
+ yield db_pool.prepare()
+ hs = HomeServer(
+ name, db_pool=db_pool, config=config,
+ version_string="Synapse/tests",
+ database_engine=create_engine("sqlite3"),
+ **kargs
+ )
+ else:
+ hs = HomeServer(
+ name, db_pool=None, datastore=datastore, config=config,
+ version_string="Synapse/tests",
+ database_engine=create_engine("sqlite3"),
+ **kargs
+ )
+
+ # bcrypt is far too slow to be doing in unit tests
+ def swap_out_hash_for_testing(old_build_handlers):
+ def build_handlers():
+ handlers = old_build_handlers()
+ auth_handler = handlers.auth_handler
+ auth_handler.hash = lambda p: hashlib.md5(p).hexdigest()
+ auth_handler.validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
+ return handlers
+ return build_handlers
+
+ hs.build_handlers = swap_out_hash_for_testing(hs.build_handlers)
+
+ defer.returnValue(hs)
+
+
+def get_mock_call_args(pattern_func, mock_func):
+ """ Return the arguments the mock function was called with interpreted
+ by the pattern functions argument list.
+ """
+ invoked_args, invoked_kargs = mock_func.call_args
+ return getcallargs(pattern_func, *invoked_args, **invoked_kargs)
+
+
+# This is a mock /resource/ not an entire server
+class MockHttpResource(HttpServer):
+
+ def __init__(self, prefix=""):
+ self.callbacks = [] # 3-tuple of method/pattern/function
+ self.prefix = prefix
+
+ def trigger_get(self, path):
+ return self.trigger("GET", path, None)
+
+ @patch('twisted.web.http.Request')
+ @defer.inlineCallbacks
+ def trigger(self, http_method, path, content, mock_request):
+ """ Fire an HTTP event.
+
+ Args:
+ http_method : The HTTP method
+ path : The HTTP path
+ content : The HTTP body
+ mock_request : Mocked request to pass to the event so it can get
+ content.
+ Returns:
+ A tuple of (code, response)
+ Raises:
+ KeyError If no event is found which will handle the path.
+ """
+ path = self.prefix + path
+
+ # annoyingly we return a twisted http request which has chained calls
+ # to get at the http content, hence mock it here.
+ mock_content = Mock()
+ config = {'read.return_value': content}
+ mock_content.configure_mock(**config)
+ mock_request.content = mock_content
+
+ mock_request.method = http_method
+ mock_request.uri = path
+
+ mock_request.getClientIP.return_value = "-"
+
+ mock_request.requestHeaders.getRawHeaders.return_value=[
+ "X-Matrix origin=test,key=,sig="
+ ]
+
+ # return the right path if the event requires it
+ mock_request.path = path
+
+ # add in query params to the right place
+ try:
+ mock_request.args = urlparse.parse_qs(path.split('?')[1])
+ mock_request.path = path.split('?')[0]
+ path = mock_request.path
+ except:
+ pass
+
+ for (method, pattern, func) in self.callbacks:
+ if http_method != method:
+ continue
+
+ matcher = pattern.match(path)
+ if matcher:
+ try:
+ args = [
+ urllib.unquote(u).decode("UTF-8")
+ for u in matcher.groups()
+ ]
+
+ (code, response) = yield func(
+ mock_request,
+ *args
+ )
+ defer.returnValue((code, response))
+ except CodeMessageException as e:
+ defer.returnValue((e.code, cs_error(e.msg)))
+
+ raise KeyError("No event can handle %s" % path)
+
+ def register_path(self, method, path_pattern, callback):
+ self.callbacks.append((method, path_pattern, callback))
+
+
+class MockKey(object):
+ alg = "mock_alg"
+ version = "mock_version"
+ signature = b"\x9a\x87$"
+
+ @property
+ def verify_key(self):
+ return self
+
+ def sign(self, message):
+ return self
+
+ def verify(self, message, sig):
+ assert sig == b"\x9a\x87$"
+
+
+class MockClock(object):
+ now = 1000
+
+ def __init__(self):
+ # list of lists of [absolute_time, callback, expired] in no particular
+ # order
+ self.timers = []
+
+ def time(self):
+ return self.now
+
+ def time_msec(self):
+ return self.time() * 1000
+
+ def call_later(self, delay, callback):
+ current_context = LoggingContext.current_context()
+
+ def wrapped_callback():
+ LoggingContext.thread_local.current_context = current_context
+ callback()
+
+ t = [self.now + delay, wrapped_callback, False]
+ self.timers.append(t)
+
+ return t
+
+ def looping_call(self, function, interval):
+ pass
+
+ def cancel_call_later(self, timer):
+ if timer[2]:
+ raise Exception("Cannot cancel an expired timer")
+
+ timer[2] = True
+ self.timers = [t for t in self.timers if t != timer]
+
+ # For unit testing
+ def advance_time(self, secs):
+ self.now += secs
+
+ timers = self.timers
+ self.timers = []
+
+ for t in timers:
+ time, callback, expired = t
+
+ if expired:
+ raise Exception("Timer already expired")
+
+ if self.now >= time:
+ t[2] = True
+ callback()
+ else:
+ self.timers.append(t)
+
+ def advance_time_msec(self, ms):
+ self.advance_time(ms / 1000.)
+
+
+class SQLiteMemoryDbPool(ConnectionPool, object):
+ def __init__(self):
+ super(SQLiteMemoryDbPool, self).__init__(
+ "sqlite3", ":memory:",
+ cp_min=1,
+ cp_max=1,
+ )
+
+ def prepare(self):
+ engine = create_engine("sqlite3")
+ return self.runWithConnection(
+ lambda conn: prepare_database(conn, engine)
+ )
+
+
+class MemoryDataStore(object):
+
+ Room = namedtuple(
+ "Room",
+ ["room_id", "is_public", "creator"]
+ )
+
+ def __init__(self):
+ self.tokens_to_users = {}
+ self.paths_to_content = {}
+
+ self.members = {}
+ self.rooms = {}
+
+ self.current_state = {}
+ self.events = []
+
+ class Snapshot(namedtuple("Snapshot", "room_id user_id membership_state")):
+ def fill_out_prev_events(self, event):
+ pass
+
+ def snapshot_room(self, room_id, user_id, state_type=None, state_key=None):
+ return self.Snapshot(
+ room_id, user_id, self.get_room_member(user_id, room_id)
+ )
+
+ def register(self, user_id, token, password_hash):
+ if user_id in self.tokens_to_users.values():
+ raise StoreError(400, "User in use.")
+ self.tokens_to_users[token] = user_id
+
+ def get_user_by_access_token(self, token):
+ try:
+ return {
+ "name": self.tokens_to_users[token],
+ }
+ except:
+ raise StoreError(400, "User does not exist.")
+
+ def get_room(self, room_id):
+ try:
+ return self.rooms[room_id]
+ except:
+ return None
+
+ def store_room(self, room_id, room_creator_user_id, is_public):
+ if room_id in self.rooms:
+ raise StoreError(409, "Conflicting room!")
+
+ room = MemoryDataStore.Room(
+ room_id=room_id,
+ is_public=is_public,
+ creator=room_creator_user_id
+ )
+ self.rooms[room_id] = room
+
+ def get_room_member(self, user_id, room_id):
+ return self.members.get(room_id, {}).get(user_id)
+
+ def get_room_members(self, room_id, membership=None):
+ if membership:
+ return [
+ v for k, v in self.members.get(room_id, {}).items()
+ if v.membership == membership
+ ]
+ else:
+ return self.members.get(room_id, {}).values()
+
+ def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
+ return [
+ self.members[r].get(user_id) for r in self.members
+ if user_id in self.members[r] and
+ self.members[r][user_id].membership in membership_list
+ ]
+
+ def get_room_events_stream(self, user_id=None, from_key=None, to_key=None,
+ limit=0, with_feedback=False):
+ return ([], from_key) # TODO
+
+ def get_joined_hosts_for_room(self, room_id):
+ return defer.succeed([])
+
+ def persist_event(self, event):
+ if event.type == EventTypes.Member:
+ room_id = event.room_id
+ user = event.state_key
+ membership = event.membership
+ self.members.setdefault(room_id, {})[user] = event
+
+ if hasattr(event, "state_key"):
+ key = (event.room_id, event.type, event.state_key)
+ self.current_state[key] = event
+
+ self.events.append(event)
+
+ def get_current_state(self, room_id, event_type=None, state_key=""):
+ if event_type:
+ key = (room_id, event_type, state_key)
+ if self.current_state.get(key):
+ return [self.current_state.get(key)]
+ return None
+ else:
+ return [
+ e for e in self.current_state
+ if e[0] == room_id
+ ]
+
+ def set_presence_state(self, user_localpart, state):
+ return defer.succeed({"state": 0})
+
+ def get_presence_list(self, user_localpart, accepted):
+ return []
+
+ def get_room_events_max_id(self):
+ return "s0" # TODO (erikj)
+
+ def get_send_event_level(self, room_id):
+ return defer.succeed(0)
+
+ def get_power_level(self, room_id, user_id):
+ return defer.succeed(0)
+
+ def get_add_state_level(self, room_id):
+ return defer.succeed(0)
+
+ def get_room_join_rule(self, room_id):
+ # TODO (erikj): This should be configurable
+ return defer.succeed("invite")
+
+ def get_ops_levels(self, room_id):
+ return defer.succeed((5, 5, 5))
+
+ def insert_client_ip(self, user, access_token, ip, user_agent):
+ return defer.succeed(None)
+
+
+def _format_call(args, kwargs):
+ return ", ".join(
+ ["%r" % (a) for a in args] +
+ ["%s=%r" % (k, v) for k, v in kwargs.items()]
+ )
+
+
+class DeferredMockCallable(object):
+ """A callable instance that stores a set of pending call expectations and
+ return values for them. It allows a unit test to assert that the given set
+ of function calls are eventually made, by awaiting on them to be called.
+ """
+
+ def __init__(self):
+ self.expectations = []
+ self.calls = []
+
+ def __call__(self, *args, **kwargs):
+ self.calls.append((args, kwargs))
+
+ if not self.expectations:
+ raise ValueError("%r has no pending calls to handle call(%s)" % (
+ self, _format_call(args, kwargs))
+ )
+
+ for (call, result, d) in self.expectations:
+ if args == call[1] and kwargs == call[2]:
+ d.callback(None)
+ return result
+
+ failure = AssertionError("Was not expecting call(%s)" %
+ _format_call(args, kwargs)
+ )
+
+ for _, _, d in self.expectations:
+ try:
+ d.errback(failure)
+ except:
+ pass
+
+ raise failure
+
+ def expect_call_and_return(self, call, result):
+ self.expectations.append((call, result, defer.Deferred()))
+
+ @defer.inlineCallbacks
+ def await_calls(self, timeout=1000):
+ deferred = defer.DeferredList(
+ [d for _, _, d in self.expectations],
+ fireOnOneErrback=True
+ )
+
+ timer = reactor.callLater(
+ timeout/1000,
+ deferred.errback,
+ AssertionError(
+ "%d pending calls left: %s"% (
+ len([e for e in self.expectations if not e[2].called]),
+ [e for e in self.expectations if not e[2].called]
+ )
+ )
+ )
+
+ yield deferred
+
+ timer.cancel()
+
+ self.calls = []
+
+ def assert_had_no_calls(self):
+ if self.calls:
+ calls = self.calls
+ self.calls = []
+
+ raise AssertionError("Expected not to received any calls, got:\n" +
+ "\n".join([
+ "call(%s)" % _format_call(c[0], c[1]) for c in calls
+ ])
+ )
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..95424765
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,28 @@
+[tox]
+envlist = packaging, py27, pep8
+
+[testenv]
+deps =
+ coverage
+ Twisted>=15.1
+ mock
+ python-subunit
+ junitxml
+setenv =
+ PYTHONDONTWRITEBYTECODE = no_byte_code
+commands =
+ /bin/bash -c "coverage run --source=synapse {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}"
+ {env:DUMP_COVERAGE_COMMAND:coverage report -m}
+
+[testenv:packaging]
+deps =
+ check-manifest
+commands =
+ check-manifest
+
+[testenv:pep8]
+skip_install = True
+basepython = python2.7
+deps =
+ flake8
+commands = /bin/bash -c "flake8 synapse {env:PEP8SUFFIX:}"