Merge branch 'release-v1.0.0' of github.com:matrix-org/synapse into matrix-org-hotfixes

matrix-org-hotfixes-identity
Erik Johnston 2019-06-10 16:05:10 +01:00
commit aad993f24d
108 changed files with 4528 additions and 448 deletions

View File

@ -36,8 +36,6 @@ steps:
image: "python:3.6"
propagate-environment: true
- wait
- command:
- "python -m pip install tox"
- "tox -e check-sampleconfig"
@ -46,6 +44,8 @@ steps:
- docker#v3.0.1:
image: "python:3.6"
- wait
- command:
- "python -m pip install tox"
- "tox -e py27,codecov"
@ -56,6 +56,12 @@ steps:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@ -67,6 +73,12 @@ steps:
- docker#v3.0.1:
image: "python:3.5"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@ -78,6 +90,12 @@ steps:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@ -89,6 +107,12 @@ steps:
- docker#v3.0.1:
image: "python:3.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@ -100,6 +124,12 @@ steps:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.4"
env:
@ -111,6 +141,12 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py27.pg94.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.5"
env:
@ -122,6 +158,12 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py27.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.5 / :postgres: 9.4"
env:
@ -133,6 +175,12 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py35.pg94.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.5 / :postgres: 9.5"
env:
@ -144,6 +192,12 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py35.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.7 / :postgres: 9.5"
env:
@ -155,6 +209,12 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py37.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.7 / :postgres: 11"
env:
@ -166,3 +226,9 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py37.pg11.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2

View File

@ -1,3 +1,86 @@
Synapse 1.0.0rc1 (2019-06-07)
=============================
Features
--------
- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324))
- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220))
- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249))
- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276))
- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286))
- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359))
- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360))
- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361))
- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371))
- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374))
- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377))
- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379))
Bugfixes
--------
- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089))
- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156))
- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200))
- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221))
- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233))
- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251))
- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256))
- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257))
- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258))
- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268))
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275))
- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277))
- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293))
- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294))
- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300))
- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307))
- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309))
- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317))
- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321))
- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333))
- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334))
- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340))
- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341))
- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342))
- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354))
- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352))
- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355))
- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362))
- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369))
Improved Documentation
----------------------
- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282))
- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357))
Internal Changes
----------------
- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216))
- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328))
- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227))
- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230))
- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356))
- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283))
- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284))
- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287))
- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288))
- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303))
- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320))
- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332))
- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344))
- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353))
- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370))
- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380))
Synapse 0.99.5.2 (2019-05-30)
=============================

View File

@ -5,6 +5,7 @@
* [Prebuilt packages](#prebuilt-packages)
* [Setting up Synapse](#setting-up-synapse)
* [TLS certificates](#tls-certificates)
* [Email](#email)
* [Registering a user](#registering-a-user)
* [Setting up a TURN server](#setting-up-a-turn-server)
* [URL previews](#url-previews)
@ -394,9 +395,22 @@ To configure Synapse to expose an HTTPS port, you will need to edit
instance, if using certbot, use `fullchain.pem` as your certificate, not
`cert.pem`).
For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
For those of you upgrading your TLS certificate for Synapse 1.0 compliance,
please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
## Email
It is desirable for Synapse to have the capability to send email. For example,
this is required to support the 'password reset' feature.
To configure an SMTP server for Synapse, modify the configuration section
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
``smtp_pass``, and ``require_transport_security``.
If Synapse is not configured with an SMTP server, password reset via email will
be disabled by default.
## Registering a user
You will need at least one user on your server in order to use a Matrix

View File

@ -9,7 +9,10 @@ include demo/*.py
include demo/*.sh
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.sql.postgres
recursive-include synapse/storage/schema *.sql.sqlite
recursive-include synapse/storage/schema *.py
recursive-include synapse/storage/schema *.txt
recursive-include docs *
recursive-include scripts *

View File

@ -49,6 +49,55 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to v1.0
=================
Validation of TLS certificates
------------------------------
Synapse v1.0 is the first release to enforce
validation of TLS certificates for the federation API. It is therefore
essential that your certificates are correctly configured. See the `FAQ
<docs/MSC1711_certificates_FAQ.md>`_ for more information.
Note, v1.0 installations will also no longer be able to federate with servers
that have not correctly configured their certificates.
In rare cases, it may be desirable to disable certificate checking: for
example, it might be essential to be able to federate with a given legacy
server in a closed federation. This can be done in one of two ways:-
* Configure the global switch ``federation_verify_certificates`` to ``false``.
* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
Email
-----
When a user requests a password reset, Synapse will send an email to the
user to confirm the request.
Previous versions of Synapse delegated the job of sending this email to an
identity server. If the identity server was somehow malicious or became
compromised, it would be theoretically possible to hijack an account through
this means.
Therefore, by default, Synapse v1.0 will send the confirmation email itself. If
Synapse is not configured with an SMTP server, password reset via email will be
disabled.
To configure an SMTP server for Synapse, modify the configuration section
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
``smtp_pass``, and ``require_transport_security``.
If you are absolutely certain that you wish to continue using an identity
server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``.
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
Upgrading to v0.99.0
====================

View File

@ -1 +0,0 @@
Synapse now more efficiently collates room statistics.

View File

@ -1 +0,0 @@
Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo.

View File

@ -1 +0,0 @@
Synapse will now serve the experimental "room complexity" API endpoint.

View File

@ -1 +0,0 @@
Add experimental support for relations (aka reactions and edits).

View File

@ -1 +0,0 @@
Ability to configure default room version.

View File

@ -1 +0,0 @@
The base classes for the v1 and v2_alpha REST APIs have been unified.

View File

@ -1 +0,0 @@
Simplifications and comments in do_auth.

View File

@ -1 +0,0 @@
Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2.

View File

@ -1 +0,0 @@
Run black on synapse.crypto.keyring.

View File

@ -1 +0,0 @@
Fix appservice timestamp massaging.

View File

@ -1 +0,0 @@
Rewrite store_server_verify_key to store several keys at once.

View File

@ -1 +0,0 @@
Remove unused VerifyKey.expired and .time_added fields.

View File

@ -1 +0,0 @@
Simplify Keyring.process_v2_response.

View File

@ -1 +0,0 @@
Store key validity time in the storage layer.

View File

@ -1 +0,0 @@
Refactor synapse.crypto.keyring to use a KeyFetcher interface.

View File

@ -1 +0,0 @@
Ability to configure default room version.

View File

@ -1 +0,0 @@
Simplification to Keyring.wait_for_previous_lookups.

View File

@ -1 +0,0 @@
Ensure that server_keys fetched via a notary server are correctly signed.

View File

@ -1 +0,0 @@
Show the correct error when logging out and access token is missing.

View File

@ -1 +0,0 @@
Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms

View File

@ -1 +0,0 @@
Fix error when downloading thumbnail with missing width/height parameter.

View File

@ -1 +0,0 @@
Synapse now more efficiently collates room statistics.

View File

@ -1 +0,0 @@
Fix schema update for account validity.

View File

@ -1 +0,0 @@
Fix bug where we leaked extremities when we soft failed events, leading to performance degradation.

View File

@ -1 +0,0 @@
Fix "db txn 'update_presence' from sentinel context" log messages.

View File

@ -1 +0,0 @@
Allow configuring a range for the account validity startup job.

View File

@ -1 +0,0 @@
Fix dropped logcontexts during high outbound traffic.

View File

@ -1 +0,0 @@
Fix bug where we leaked extremities when we soft failed events, leading to performance degradation.

View File

@ -1 +0,0 @@
Fix docs on resetting the user directory.

View File

@ -1 +0,0 @@
Specify the type of reCAPTCHA key to use.

View File

@ -1 +0,0 @@
CAS login will now hit the r0 API, not the deprecated v1 one.

View File

@ -1 +0,0 @@
Remove spurious debug from MatrixFederationHttpClient.get_json.

View File

@ -1 +0,0 @@
Improve logging for logcontext leaks.

View File

@ -1 +0,0 @@
Fix bug where we leaked extremities when we soft failed events, leading to performance degradation.

View File

@ -1 +0,0 @@
Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`.

View File

@ -1 +0,0 @@
Fix performance problems with the rooms stats background update.

View File

@ -1 +0,0 @@
Refactor keyring.VerifyKeyRequest to use attr.s.

View File

@ -1 +0,0 @@
Rewrite get_server_verify_keys, again.

View File

@ -1 +0,0 @@
Fix noisy 'no key for server' logs.

View File

@ -1 +0,0 @@
Clarify that the admin change password API logs the user out.

View File

@ -1 +0,0 @@
Fix bug where a notary server would sometimes forget old keys.

View File

@ -1 +0,0 @@
Prevent users from setting huge displaynames and avatar URLs.

View File

@ -1 +0,0 @@
Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests.

View File

@ -1 +0,0 @@
Synapse now more efficiently collates room statistics.

View File

@ -1 +0,0 @@
The base classes for the v1 and v2_alpha REST APIs have been unified.

View File

@ -1 +0,0 @@
Improve docstrings on MatrixFederationClient.

View File

@ -1 +0,0 @@
Fix various problems which made the signing-key notary server time out for some requests.

View File

@ -1 +0,0 @@
Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys.

View File

@ -1 +0,0 @@
Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes.

View File

@ -1 +0,0 @@
Fix a bug where account validity renewal emails could only be sent when email notifs were enabled.

1
changelog.d/5392.bugfix Normal file
View File

@ -0,0 +1 @@
Remove redundant warning about key server response validation.

1
changelog.d/5415.bugfix Normal file
View File

@ -0,0 +1 @@
Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail.

View File

@ -68,16 +68,14 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a
.well-known entry (see below), should retain their SRV record and use it
alongside their .well-known record.
**>= 5th March 2019 - Synapse 1.0.0 is released**
**10th June 2019 - Synapse 1.0.0 is released**
1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one
month after 5th February to upgrade to 0.99.0 and deploy their certificates. In
1.0.0 is scheduled for release on 10th June. In
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
1.0.0 will enforce certificate validity. This means that any homeserver without a
valid certificate after this point will no longer be able to federate with
1.0.0 servers.
## Configuring certificates for compatibility with Synapse 1.0.0
### If you do not currently have an SRV record
@ -145,12 +143,11 @@ You can do this with a `.well-known` file as follows:
1. Keep the SRV record in place - it is needed for backwards compatibility
with Synapse 0.34 and earlier.
2. Give synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). Currently Synapse's ACME
support [does not support
this](https://github.com/matrix-org/synapse/issues/4552), so you will have
to acquire a certificate yourself and give it to Synapse via
`tls_certificate_path` and `tls_private_key_path`.
2. Give Synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). You can either use Synapse's
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
the `acme` section), or acquire a certificate yourself and give it to
Synapse via `tls_certificate_path` and `tls_private_key_path`.
3. Restart Synapse to ensure the new certificate is loaded.

View File

@ -91,7 +91,7 @@ pid_file: DATADIR/homeserver.pid
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "1"
#default_room_version: "4"
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
@ -261,6 +261,22 @@ listeners:
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# anabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
#limit_usage_by_mau: False
#max_mau_value: 50
#mau_trial_days: 2
@ -313,12 +329,12 @@ listeners:
#
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
# Whether to verify TLS certificates when sending federation traffic.
# Whether to verify TLS server certificates for outbound federation requests.
#
# This currently defaults to `false`, however this will change in
# Synapse 1.0 when valid federation certificates will be required.
# Defaults to `true`. To disable certificate verification, uncomment the
# following line.
#
#federation_verify_certificates: true
#federation_verify_certificates: false
# Skip federation certificate verification on the following whitelist
# of domains.
@ -936,12 +952,43 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# The trusted servers to download signing keys from.
#
#perspectives:
# servers:
# "matrix.org":
# verify_keys:
# "ed25519:auto":
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# When we need to fetch a signing key, each server is tried in parallel.
#
# Normally, the connection to the key server is validated via TLS certificates.
# Additional security can be provided by configuring a `verify key`, which
# will make synapse check that the response is signed by that key.
#
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
#
# verify_keys: an optional map from key id to base64-encoded public key.
# If specified, we will check that the response is signed by at least
# one of the given keys.
#
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
# and federation_verify_certificates is not `true`, synapse will refuse
# to start, because this would allow anyone who can spoof DNS responses
# to masquerade as the trusted key server. If you know what you are doing
# and are sure that your network environment provides a secure connection
# to the key server, you can set this to `true` to override this
# behaviour.
#
# An example configuration might look like:
#
#trusted_key_servers:
# - server_name: "my_trusted_server.example.com"
# verify_keys:
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
# The default configuration is:
#
#trusted_key_servers:
# - server_name: "matrix.org"
# Enable SAML2 for registration and login. Uses pysaml2.
@ -1018,10 +1065,8 @@ password_config:
# Enable sending emails for notification events or expiry notices
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
# Enable sending emails for password resets, notification events or
# account expiry notices
#
# If your SMTP server requires authentication, the optional smtp_user &
# smtp_pass variables should be used
@ -1029,22 +1074,64 @@ password_config:
#email:
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25
# smtp_port: 25 # SSL: 465, STARTTLS: 587
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
# # if template_dir is unset, uses the example templates that are part of
# # the Synapse distribution.
#
# # Enable email notifications by default
# notif_for_new_users: True
#
# # Defining a custom URL for Riot is only needed if email notifications
# # should contain links to a self-hosted installation of Riot; when set
# # the "app_name" setting is ignored
# riot_base_url: "http://localhost/riot"
#
# # Enable sending password reset emails via the configured, trusted
# # identity servers
# #
# # IMPORTANT! This will give a malicious or overtaken identity server
# # the ability to reset passwords for your users! Make absolutely sure
# # that you want to do this! It is strongly recommended that password
# # reset emails be sent by the homeserver instead
# #
# # If this option is set to false and SMTP options have not been
# # configured, resetting user passwords via email will be disabled
# #trust_identity_server_for_password_resets: false
#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
# # This is currently used for password resets
# #validation_token_lifetime: 1h
#
# # Template directory. All template files should be stored within this
# # directory
# #
# #template_dir: res/templates
#
# # Templates for email notifications
# #
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
# # Templates for account expiry notices.
#
# # Templates for account expiry notices
# #
# expiry_template_html: notice_expiry.html
# expiry_template_text: notice_expiry.txt
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
#
# # Templates for password reset emails sent by the homeserver
# #
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
#password_providers:

37
scripts/generate_signing_key.py Executable file
View File

@ -0,0 +1,37 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from signedjson.key import write_signing_keys, generate_signing_key
from synapse.util.stringutils import random_string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output_file",
type=argparse.FileType('w'),
default=sys.stdout,
help="Where to write the output to",
)
args = parser.parse_args()
key_id = "a_" + random_string(4)
key = generate_signing_key(key_id),
write_signing_keys(args.output_file, key)

View File

@ -27,4 +27,4 @@ try:
except ImportError:
pass
__version__ = "0.99.5.2"
__version__ = "1.0.0rc1"

View File

@ -339,6 +339,15 @@ class UnsupportedRoomVersionError(SynapseError):
)
class ThreepidValidationError(SynapseError):
"""An error raised when there was a problem authorising an event."""
def __init__(self, *args, **kwargs):
if "errcode" not in kwargs:
kwargs["errcode"] = Codes.FORBIDDEN
super(ThreepidValidationError, self).__init__(*args, **kwargs)
class IncompatibleRoomVersionError(SynapseError):
"""A server is trying to join a room whose version it does not support.

View File

@ -50,6 +50,7 @@ class RoomVersion(object):
disposition = attr.ib() # str; one of the RoomDispositions
event_format = attr.ib() # int; one of the EventFormatVersions
state_res = attr.ib() # int; one of the StateResolutionVersions
enforce_key_validity = attr.ib() # bool
class RoomVersions(object):
@ -58,30 +59,35 @@ class RoomVersions(object):
RoomDisposition.STABLE,
EventFormatVersions.V1,
StateResolutionVersions.V1,
)
STATE_V2_TEST = RoomVersion(
"state-v2-test",
RoomDisposition.UNSTABLE,
EventFormatVersions.V1,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V2 = RoomVersion(
"2",
RoomDisposition.STABLE,
EventFormatVersions.V1,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V3 = RoomVersion(
"3",
RoomDisposition.STABLE,
EventFormatVersions.V2,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V4 = RoomVersion(
"4",
RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V5 = RoomVersion(
"5",
RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
)
@ -90,7 +96,7 @@ KNOWN_ROOM_VERSIONS = {
RoomVersions.V1,
RoomVersions.V2,
RoomVersions.V3,
RoomVersions.STATE_V2_TEST,
RoomVersions.V4,
RoomVersions.V5,
)
} # type: dict[str, RoomVersion]

View File

@ -176,6 +176,7 @@ class SynapseHomeServer(HomeServer):
resources.update({
"/_matrix/client/api/v1": client_resource,
"/_synapse/password_reset": client_resource,
"/_matrix/client/r0": client_resource,
"/_matrix/client/unstable": client_resource,
"/_matrix/client/v2_alpha": client_resource,

View File

@ -50,6 +50,11 @@ class EmailConfig(Config):
else:
self.email_app_name = "Matrix"
# TODO: Rename notif_from to something more generic, or have a separate
# from for password resets, message notifications, etc?
# Currently the email section is a bit bogged down with settings for
# multiple functions. Would be good to split it out into separate
# sections and only put the common ones under email:
self.email_notif_from = email_config.get("notif_from", None)
if self.email_notif_from is not None:
# make sure it's valid
@ -74,7 +79,28 @@ class EmailConfig(Config):
"account_validity", {},
).get("renew_at")
if self.email_enable_notifs or account_validity_renewal_enabled:
email_trust_identity_server_for_password_resets = email_config.get(
"trust_identity_server_for_password_resets", False,
)
self.email_password_reset_behaviour = (
"remote" if email_trust_identity_server_for_password_resets else "local"
)
if self.email_password_reset_behaviour == "local" and email_config == {}:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
self.email_password_reset_behaviour = "off"
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
email_config.get("validation_token_lifetime", "1h")
)
if (
self.email_enable_notifs
or account_validity_renewal_enabled
or self.email_password_reset_behaviour == "local"
):
# make sure we can import the required deps
import jinja2
import bleach
@ -82,6 +108,67 @@ class EmailConfig(Config):
jinja2
bleach
if self.email_password_reset_behaviour == "local":
required = [
"smtp_host",
"smtp_port",
"notif_from",
]
missing = []
for k in required:
if k not in email_config:
missing.append(k)
if (len(missing) > 0):
raise RuntimeError(
"email.password_reset_behaviour is set to 'local' "
"but required keys are missing: %s" %
(", ".join(["email." + k for k in missing]),)
)
# Templates for password reset emails
self.email_password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html",
)
self.email_password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt",
)
self.email_password_reset_failure_template = email_config.get(
"password_reset_failure_template", "password_reset_failure.html",
)
# This template does not support any replaceable variables, so we will
# read it from the disk once during setup
email_password_reset_success_template = email_config.get(
"password_reset_success_template", "password_reset_success.html",
)
# Check templates exist
for f in [self.email_password_reset_template_html,
self.email_password_reset_template_text,
self.email_password_reset_failure_template,
email_password_reset_success_template]:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
raise ConfigError("Unable to find template file %s" % (p, ))
# Retrieve content of web templates
filepath = os.path.join(
self.email_template_dir,
email_password_reset_success_template,
)
self.email_password_reset_success_html_content = self.read_file(
filepath,
"email.password_reset_template_success_html",
)
if config.get("public_baseurl") is None:
raise RuntimeError(
"email.password_reset_behaviour is set to 'local' but no "
"public_baseurl is set. This is necessary to generate password "
"reset links"
)
if self.email_enable_notifs:
required = [
"smtp_host",
@ -121,10 +208,6 @@ class EmailConfig(Config):
self.email_riot_base_url = email_config.get(
"riot_base_url", None
)
else:
self.email_enable_notifs = False
# Not much point setting defaults for the rest: it would be an
# error for them to be used.
if account_validity_renewal_enabled:
self.email_expiry_template_html = email_config.get(
@ -141,10 +224,8 @@ class EmailConfig(Config):
def default_config(self, config_dir_path, server_name, **kwargs):
return """
# Enable sending emails for notification events or expiry notices
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
# Enable sending emails for password resets, notification events or
# account expiry notices
#
# If your SMTP server requires authentication, the optional smtp_user &
# smtp_pass variables should be used
@ -152,20 +233,62 @@ class EmailConfig(Config):
#email:
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25
# smtp_port: 25 # SSL: 465, STARTTLS: 587
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
# # if template_dir is unset, uses the example templates that are part of
# # the Synapse distribution.
#
# # Enable email notifications by default
# notif_for_new_users: True
#
# # Defining a custom URL for Riot is only needed if email notifications
# # should contain links to a self-hosted installation of Riot; when set
# # the "app_name" setting is ignored
# riot_base_url: "http://localhost/riot"
#
# # Enable sending password reset emails via the configured, trusted
# # identity servers
# #
# # IMPORTANT! This will give a malicious or overtaken identity server
# # the ability to reset passwords for your users! Make absolutely sure
# # that you want to do this! It is strongly recommended that password
# # reset emails be sent by the homeserver instead
# #
# # If this option is set to false and SMTP options have not been
# # configured, resetting user passwords via email will be disabled
# #trust_identity_server_for_password_resets: false
#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
# # This is currently used for password resets
# #validation_token_lifetime: 1h
#
# # Template directory. All template files should be stored within this
# # directory
# #
# #template_dir: res/templates
#
# # Templates for email notifications
# #
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
# # Templates for account expiry notices.
#
# # Templates for account expiry notices
# #
# expiry_template_html: notice_expiry.html
# expiry_template_text: notice_expiry.txt
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
#
# # Templates for password reset emails sent by the homeserver
# #
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
"""

View File

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -17,6 +18,8 @@ import hashlib
import logging
import os
import attr
import jsonschema
from signedjson.key import (
NACL_ED25519,
decode_signing_key_base64,
@ -32,11 +35,36 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
from ._base import Config, ConfigError
INSECURE_NOTARY_ERROR = """\
Your server is configured to accept key server responses without signature
validation or TLS certificate validation. This is likely to be very insecure. If
you are *sure* you want to do this, set 'accept_keys_insecurely' on the
keyserver configuration."""
RELYING_ON_MATRIX_KEY_ERROR = """\
Your server is configured to accept key server responses without TLS certificate
validation, and which are only signed by the old (possibly compromised)
matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do,
and you should enable 'federation_verify_certificates' in your configuration.
If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
trusted_key_server configuration."""
logger = logging.getLogger(__name__)
class KeyConfig(Config):
@attr.s
class TrustedKeyServer(object):
# string: name of the server.
server_name = attr.ib()
# dict[str,VerifyKey]|None: map from key id to key object, or None to disable
# signature verification.
verify_keys = attr.ib(default=None)
class KeyConfig(Config):
def read_config(self, config):
# the signing key can be specified inline or in a separate file
if "signing_key" in config:
@ -49,16 +77,27 @@ class KeyConfig(Config):
config.get("old_signing_keys", {})
)
self.key_refresh_interval = self.parse_duration(
config.get("key_refresh_interval", "1d"),
config.get("key_refresh_interval", "1d")
)
self.perspectives = self.read_perspectives(
config.get("perspectives", {}).get("servers", {
"matrix.org": {"verify_keys": {
"ed25519:auto": {
"key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
}
}}
})
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
key_servers = [{"server_name": "matrix.org"}]
else:
key_servers = config.get("trusted_key_servers", [])
if not isinstance(key_servers, list):
raise ConfigError(
"trusted_key_servers, if given, must be a list, not a %s"
% (type(key_servers).__name__,)
)
# merge the 'perspectives' config into the 'trusted_key_servers' config.
key_servers.extend(_perspectives_to_key_servers(config))
# list of TrustedKeyServer objects
self.key_servers = list(
_parse_key_servers(key_servers, self.federation_verify_certificates)
)
self.macaroon_secret_key = config.get(
@ -78,8 +117,9 @@ class KeyConfig(Config):
# falsification of values
self.form_secret = config.get("form_secret", None)
def default_config(self, config_dir_path, server_name, generate_secrets=False,
**kwargs):
def default_config(
self, config_dir_path, server_name, generate_secrets=False, **kwargs
):
base_key_name = os.path.join(config_dir_path, server_name)
if generate_secrets:
@ -91,7 +131,8 @@ class KeyConfig(Config):
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
form_secret = "# form_secret: <PRIVATE STRING>"
return """\
return (
"""\
# a secret which is used to sign access tokens. If none is specified,
# the registration_shared_secret is used, if one is given; otherwise,
# a secret key is derived from the signing key.
@ -133,33 +174,53 @@ class KeyConfig(Config):
# The trusted servers to download signing keys from.
#
#perspectives:
# servers:
# "matrix.org":
# verify_keys:
# "ed25519:auto":
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
""" % locals()
def read_perspectives(self, perspectives_servers):
servers = {}
for server_name, server_config in perspectives_servers.items():
for key_id, key_data in server_config["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
servers.setdefault(server_name, {})[key_id] = verify_key
return servers
# When we need to fetch a signing key, each server is tried in parallel.
#
# Normally, the connection to the key server is validated via TLS certificates.
# Additional security can be provided by configuring a `verify key`, which
# will make synapse check that the response is signed by that key.
#
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
#
# verify_keys: an optional map from key id to base64-encoded public key.
# If specified, we will check that the response is signed by at least
# one of the given keys.
#
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
# and federation_verify_certificates is not `true`, synapse will refuse
# to start, because this would allow anyone who can spoof DNS responses
# to masquerade as the trusted key server. If you know what you are doing
# and are sure that your network environment provides a secure connection
# to the key server, you can set this to `true` to override this
# behaviour.
#
# An example configuration might look like:
#
#trusted_key_servers:
# - server_name: "my_trusted_server.example.com"
# verify_keys:
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
# The default configuration is:
#
#trusted_key_servers:
# - server_name: "matrix.org"
"""
% locals()
)
def read_signing_key(self, signing_key_path):
signing_keys = self.read_file(signing_key_path, "signing_key")
try:
return read_signing_keys(signing_keys.splitlines(True))
except Exception as e:
raise ConfigError(
"Error reading signing_key: %s" % (str(e))
)
raise ConfigError("Error reading signing_key: %s" % (str(e)))
def read_old_signing_keys(self, old_signing_keys):
keys = {}
@ -182,9 +243,7 @@ class KeyConfig(Config):
if not self.path_exists(signing_key_path):
with open(signing_key_path, "w") as signing_key_file:
key_id = "a_" + random_string(4)
write_signing_keys(
signing_key_file, (generate_signing_key(key_id),),
)
write_signing_keys(signing_key_file, (generate_signing_key(key_id),))
else:
signing_keys = self.read_file(signing_key_path, "signing_key")
if len(signing_keys.split("\n")[0].split()) == 1:
@ -194,6 +253,116 @@ class KeyConfig(Config):
NACL_ED25519, key_id, signing_keys.split("\n")[0]
)
with open(signing_key_path, "w") as signing_key_file:
write_signing_keys(
signing_key_file, (key,),
write_signing_keys(signing_key_file, (key,))
def _perspectives_to_key_servers(config):
"""Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
Returns an iterable of entries to add to trusted_key_servers.
"""
# 'perspectives' looks like:
#
# {
# "servers": {
# "matrix.org": {
# "verify_keys": {
# "ed25519:auto": {
# "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# }
# }
# }
# }
# }
#
# 'trusted_keys' looks like:
#
# [
# {
# "server_name": "matrix.org",
# "verify_keys": {
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
# }
# }
# ]
perspectives_servers = config.get("perspectives", {}).get("servers", {})
for server_name, server_opts in perspectives_servers.items():
trusted_key_server_entry = {"server_name": server_name}
verify_keys = server_opts.get("verify_keys")
if verify_keys is not None:
trusted_key_server_entry["verify_keys"] = {
key_id: key_data["key"] for key_id, key_data in verify_keys.items()
}
yield trusted_key_server_entry
TRUSTED_KEY_SERVERS_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "schema for the trusted_key_servers setting",
"type": "array",
"items": {
"type": "object",
"properties": {
"server_name": {"type": "string"},
"verify_keys": {
"type": "object",
# each key must be a base64 string
"additionalProperties": {"type": "string"},
},
},
"required": ["server_name"],
},
}
def _parse_key_servers(key_servers, federation_verify_certificates):
try:
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
except jsonschema.ValidationError as e:
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
for server in key_servers:
server_name = server["server_name"]
result = TrustedKeyServer(server_name=server_name)
verify_keys = server.get("verify_keys")
if verify_keys is not None:
result.verify_keys = {}
for key_id, key_base64 in verify_keys.items():
if not is_signing_algorithm_supported(key_id):
raise ConfigError(
"Unsupported signing algorithm on key %s for server %s in "
"trusted_key_servers" % (key_id, server_name)
)
try:
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
except Exception as e:
raise ConfigError(
"Unable to parse key %s for server %s in "
"trusted_key_servers: %s" % (key_id, server_name, e)
)
result.verify_keys[key_id] = verify_key
if (
not federation_verify_certificates and
not server.get("accept_keys_insecurely")
):
_assert_keyserver_has_verify_keys(result)
yield result
def _assert_keyserver_has_verify_keys(trusted_key_server):
if not trusted_key_server.verify_keys:
raise ConfigError(INSECURE_NOTARY_ERROR)
# also check that they are not blindly checking the old matrix.org key
if trusted_key_server.server_name == "matrix.org" and any(
key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys
):
raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR)

View File

@ -36,7 +36,7 @@ logger = logging.Logger(__name__)
# in the list.
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
DEFAULT_ROOM_VERSION = "1"
DEFAULT_ROOM_VERSION = "4"
class ServerConfig(Config):
@ -585,6 +585,22 @@ class ServerConfig(Config):
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# anabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
#limit_usage_by_mau: False
#max_mau_value: 50
#mau_trial_days: 2

View File

@ -74,7 +74,7 @@ class TlsConfig(Config):
# Whether to verify certificates on outbound federation traffic
self.federation_verify_certificates = config.get(
"federation_verify_certificates", False,
"federation_verify_certificates", True,
)
# Whitelist of domains to not verify certificates for
@ -107,7 +107,7 @@ class TlsConfig(Config):
certs = []
for ca_file in custom_ca_list:
logger.debug("Reading custom CA certificate file: %s", ca_file)
content = self.read_file(ca_file)
content = self.read_file(ca_file, "federation_custom_ca_list")
# Parse the CA certificates
try:
@ -241,12 +241,12 @@ class TlsConfig(Config):
#
#tls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS certificates when sending federation traffic.
# Whether to verify TLS server certificates for outbound federation requests.
#
# This currently defaults to `false`, however this will change in
# Synapse 1.0 when valid federation certificates will be required.
# Defaults to `true`. To disable certificate verification, uncomment the
# following line.
#
#federation_verify_certificates: true
#federation_verify_certificates: false
# Skip federation certificate verification on the following whitelist
# of domains.

View File

@ -31,7 +31,11 @@ logger = logging.getLogger(__name__)
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
"""Check whether the hash for this PDU matches the contents"""
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
logger.debug(
"Verifying content hash on %s (expecting: %s)",
event.event_id,
encode_base64(expected_hash),
)
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.

View File

@ -60,9 +60,9 @@ logger = logging.getLogger(__name__)
@attr.s(slots=True, cmp=False)
class VerifyKeyRequest(object):
class VerifyJsonRequest(object):
"""
A request for a verify key to verify a JSON object.
A request to verify a JSON object.
Attributes:
server_name(str): The name of the server to verify against.
@ -75,7 +75,7 @@ class VerifyKeyRequest(object):
minimum_valid_until_ts (int): time at which we require the signing key to
be valid. (0 implies we don't care)
deferred(Deferred[str, str, nacl.signing.VerifyKey]):
key_ready (Deferred[str, str, nacl.signing.VerifyKey]):
A deferred (server_name, key_id, verify_key) tuple that resolves when
a verify key has been fetched. The deferreds' callbacks are run with no
logcontext.
@ -85,10 +85,14 @@ class VerifyKeyRequest(object):
"""
server_name = attr.ib()
key_ids = attr.ib()
json_object = attr.ib()
minimum_valid_until_ts = attr.ib()
deferred = attr.ib(default=attr.Factory(defer.Deferred))
request_name = attr.ib()
key_ids = attr.ib(init=False)
key_ready = attr.ib(default=attr.Factory(defer.Deferred))
def __attrs_post_init__(self):
self.key_ids = signature_ids(self.json_object, self.server_name)
class KeyLookupError(ValueError):
@ -114,7 +118,9 @@ class Keyring(object):
# These are regular, logcontext-agnostic Deferreds.
self.key_downloads = {}
def verify_json_for_server(self, server_name, json_object, validity_time):
def verify_json_for_server(
self, server_name, json_object, validity_time, request_name
):
"""Verify that a JSON object has been signed by a given server
Args:
@ -125,24 +131,31 @@ class Keyring(object):
validity_time (int): timestamp at which we require the signing key to
be valid. (0 implies we don't care)
request_name (str): an identifier for this json object (eg, an event id)
for logging.
Returns:
Deferred[None]: completes if the the object was correctly signed, otherwise
errbacks with an error
"""
req = server_name, json_object, validity_time
return logcontext.make_deferred_yieldable(
self.verify_json_objects_for_server((req,))[0]
)
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
requests = (req,)
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
def verify_json_objects_for_server(self, server_and_json):
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
Args:
server_and_json (iterable[Tuple[str, dict, int]):
Iterable of triplets of (server_name, json_object, validity_time)
validity_time is a timestamp at which the signing key must be valid.
server_and_json (iterable[Tuple[str, dict, int, str]):
Iterable of (server_name, json_object, validity_time, request_name)
tuples.
validity_time is a timestamp at which the signing key must be
valid.
request_name is an identifier for this json object (eg, an event id)
for logging.
Returns:
List<Deferred[None]>: for each input triplet, a deferred indicating success
@ -150,38 +163,54 @@ class Keyring(object):
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
# a list of VerifyKeyRequests
verify_requests = []
return self._verify_objects(
VerifyJsonRequest(server_name, json_object, validity_time, request_name)
for server_name, json_object, validity_time, request_name in server_and_json
)
def _verify_objects(self, verify_requests):
"""Does the work of verify_json_[objects_]for_server
Args:
verify_requests (iterable[VerifyJsonRequest]):
Iterable of verification requests.
Returns:
List<Deferred[None]>: for each input item, a deferred indicating success
or failure to verify each json object's signature for the given
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
# a list of VerifyJsonRequests which are awaiting a key lookup
key_lookups = []
handle = preserve_fn(_handle_key_deferred)
def process(server_name, json_object, validity_time):
def process(verify_request):
"""Process an entry in the request list
Given a (server_name, json_object, validity_time) triplet from the request
list, adds a key request to verify_requests, and returns a deferred which
Adds a key request to key_lookups, and returns a deferred which
will complete or fail (in the sentinel context) when verification completes.
"""
key_ids = signature_ids(json_object, server_name)
if not key_ids:
if not verify_request.key_ids:
return defer.fail(
SynapseError(
400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED
400,
"Not signed by %s" % (verify_request.server_name,),
Codes.UNAUTHORIZED,
)
)
logger.debug(
"Verifying for %s with key_ids %s, min_validity %i",
server_name,
key_ids,
validity_time,
"Verifying %s for %s with key_ids %s, min_validity %i",
verify_request.request_name,
verify_request.server_name,
verify_request.key_ids,
verify_request.minimum_valid_until_ts,
)
# add the key request to the queue, but don't start it off yet.
verify_request = VerifyKeyRequest(
server_name, key_ids, json_object, validity_time
)
verify_requests.append(verify_request)
key_lookups.append(verify_request)
# now run _handle_key_deferred, which will wait for the key request
# to complete and then do the verification.
@ -190,13 +219,10 @@ class Keyring(object):
# wrap it with preserve_fn (aka run_in_background)
return handle(verify_request)
results = [
process(server_name, json_object, validity_time)
for server_name, json_object, validity_time in server_and_json
]
results = [process(r) for r in verify_requests]
if verify_requests:
run_in_background(self._start_key_lookups, verify_requests)
if key_lookups:
run_in_background(self._start_key_lookups, key_lookups)
return results
@ -204,10 +230,10 @@ class Keyring(object):
def _start_key_lookups(self, verify_requests):
"""Sets off the key fetches for each verify request
Once each fetch completes, verify_request.deferred will be resolved.
Once each fetch completes, verify_request.key_ready will be resolved.
Args:
verify_requests (List[VerifyKeyRequest]):
verify_requests (List[VerifyJsonRequest]):
"""
try:
@ -250,7 +276,7 @@ class Keyring(object):
return res
for verify_request in verify_requests:
verify_request.deferred.addBoth(remove_deferreds, verify_request)
verify_request.key_ready.addBoth(remove_deferreds, verify_request)
except Exception:
logger.exception("Error starting key lookups")
@ -303,16 +329,16 @@ class Keyring(object):
def _get_server_verify_keys(self, verify_requests):
"""Tries to find at least one key for each verify request
For each verify_request, verify_request.deferred is called back with
For each verify_request, verify_request.key_ready is called back with
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
with a SynapseError if none of the keys are found.
Args:
verify_requests (list[VerifyKeyRequest]): list of verify requests
verify_requests (list[VerifyJsonRequest]): list of verify requests
"""
remaining_requests = set(
(rq for rq in verify_requests if not rq.deferred.called)
(rq for rq in verify_requests if not rq.key_ready.called)
)
@defer.inlineCallbacks
@ -326,7 +352,7 @@ class Keyring(object):
# look for any requests which weren't satisfied
with PreserveLoggingContext():
for verify_request in remaining_requests:
verify_request.deferred.errback(
verify_request.key_ready.errback(
SynapseError(
401,
"No key for %s with ids in %s (min_validity %i)"
@ -346,8 +372,8 @@ class Keyring(object):
logger.error("Unexpected error in _get_server_verify_keys: %s", err)
with PreserveLoggingContext():
for verify_request in remaining_requests:
if not verify_request.deferred.called:
verify_request.deferred.errback(err)
if not verify_request.key_ready.called:
verify_request.key_ready.errback(err)
run_in_background(do_iterations).addErrback(on_err)
@ -357,7 +383,7 @@ class Keyring(object):
Args:
fetcher (KeyFetcher): fetcher to use to fetch the keys
remaining_requests (set[VerifyKeyRequest]): outstanding key requests.
remaining_requests (set[VerifyJsonRequest]): outstanding key requests.
Any successfully-completed requests will be removed from the list.
"""
# dict[str, dict[str, int]]: keys to fetch.
@ -366,7 +392,7 @@ class Keyring(object):
for verify_request in remaining_requests:
# any completed requests should already have been removed
assert not verify_request.deferred.called
assert not verify_request.key_ready.called
keys_for_server = missing_keys[verify_request.server_name]
for key_id in verify_request.key_ids:
@ -376,7 +402,7 @@ class Keyring(object):
# the requests.
keys_for_server[key_id] = max(
keys_for_server.get(key_id, -1),
verify_request.minimum_valid_until_ts
verify_request.minimum_valid_until_ts,
)
results = yield fetcher.get_keys(missing_keys)
@ -386,7 +412,7 @@ class Keyring(object):
server_name = verify_request.server_name
# see if any of the keys we got this time are sufficient to
# complete this VerifyKeyRequest.
# complete this VerifyJsonRequest.
result_keys = results.get(server_name, {})
for key_id in verify_request.key_ids:
fetch_key_result = result_keys.get(key_id)
@ -402,7 +428,7 @@ class Keyring(object):
continue
with PreserveLoggingContext():
verify_request.deferred.callback(
verify_request.key_ready.callback(
(server_name, key_id, fetch_key_result.verify_key)
)
completed.append(verify_request)
@ -454,9 +480,7 @@ class BaseV2KeyFetcher(object):
self.config = hs.get_config()
@defer.inlineCallbacks
def process_v2_response(
self, from_server, response_json, time_added_ms
):
def process_v2_response(self, from_server, response_json, time_added_ms):
"""Parse a 'Server Keys' structure from the result of a /key request
This is used to parse either the entirety of the response from
@ -561,25 +585,27 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
super(PerspectivesKeyFetcher, self).__init__(hs)
self.clock = hs.get_clock()
self.client = hs.get_http_client()
self.perspective_servers = self.config.perspectives
self.key_servers = self.config.key_servers
@defer.inlineCallbacks
def get_keys(self, keys_to_fetch):
"""see KeyFetcher.get_keys"""
@defer.inlineCallbacks
def get_key(perspective_name, perspective_keys):
def get_key(key_server):
try:
result = yield self.get_server_verify_key_v2_indirect(
keys_to_fetch, perspective_name, perspective_keys
keys_to_fetch, key_server
)
defer.returnValue(result)
except KeyLookupError as e:
logger.warning("Key lookup failed from %r: %s", perspective_name, e)
logger.warning(
"Key lookup failed from %r: %s", key_server.server_name, e
)
except Exception as e:
logger.exception(
"Unable to get key from %r: %s %s",
perspective_name,
key_server.server_name,
type(e).__name__,
str(e),
)
@ -589,8 +615,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
results = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(get_key, p_name, p_keys)
for p_name, p_keys in self.perspective_servers.items()
run_in_background(get_key, server)
for server in self.key_servers
],
consumeErrors=True,
).addErrback(unwrapFirstError)
@ -605,17 +631,15 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
@defer.inlineCallbacks
def get_server_verify_key_v2_indirect(
self, keys_to_fetch, perspective_name, perspective_keys
self, keys_to_fetch, key_server
):
"""
Args:
keys_to_fetch (dict[str, dict[str, int]]):
the keys to be fetched. server_name -> key_id -> min_valid_ts
perspective_name (str): name of the notary server to query for the keys
perspective_keys (dict[str, VerifyKey]): map of key_id->key for the
notary server
key_server (synapse.config.key.TrustedKeyServer): notary server to query for
the keys
Returns:
Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map
@ -625,6 +649,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
KeyLookupError if there was an error processing the entire response from
the server
"""
perspective_name = key_server.server_name
logger.info(
"Requesting keys %s from notary server %s",
keys_to_fetch.items(),
@ -665,11 +690,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
)
try:
processed_response = yield self._process_perspectives_response(
perspective_name,
perspective_keys,
self._validate_perspectives_response(
key_server,
response,
time_added_ms=time_now_ms,
)
processed_response = yield self.process_v2_response(
perspective_name, response, time_added_ms=time_now_ms
)
except KeyLookupError as e:
logger.warning(
@ -693,28 +720,24 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
defer.returnValue(keys)
def _process_perspectives_response(
self, perspective_name, perspective_keys, response, time_added_ms
def _validate_perspectives_response(
self, key_server, response,
):
"""Parse a 'Server Keys' structure from the result of a /key/query request
Checks that the entry is correctly signed by the perspectives server, and then
passes over to process_v2_response
"""Optionally check the signature on the result of a /key/query request
Args:
perspective_name (str): the name of the notary server that produced this
result
perspective_keys (dict[str, VerifyKey]): map of key_id->key for the
notary server
key_server (synapse.config.key.TrustedKeyServer): the notary server that
produced this result
response (dict): the json-decoded Server Keys response object
time_added_ms (int): the timestamp to record in server_keys_json
Returns:
Deferred[dict[str, FetchKeyResult]]: map from key_id to result object
"""
perspective_name = key_server.server_name
perspective_keys = key_server.verify_keys
if perspective_keys is None:
# signature checking is disabled on this server
return
if (
u"signatures" not in response
or perspective_name not in response[u"signatures"]
@ -736,10 +759,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
)
)
return self.process_v2_response(
perspective_name, response, time_added_ms=time_added_ms
)
class ServerKeyFetcher(BaseV2KeyFetcher):
"""KeyFetcher impl which fetches keys from the origin servers"""
@ -852,7 +871,7 @@ def _handle_key_deferred(verify_request):
"""Waits for the key to become available, and then performs a verification
Args:
verify_request (VerifyKeyRequest):
verify_request (VerifyJsonRequest):
Returns:
Deferred[None]
@ -862,14 +881,10 @@ def _handle_key_deferred(verify_request):
"""
server_name = verify_request.server_name
with PreserveLoggingContext():
_, key_id, verify_key = yield verify_request.deferred
_, key_id, verify_key = yield verify_request.key_ready
json_object = verify_request.json_object
logger.debug(
"Got key %s %s:%s for server %s, verifying"
% (key_id, verify_key.alg, verify_key.version, server_name)
)
try:
verify_signed_json(json_object, server_name, verify_key)
except SignatureVerifyException as e:

View File

@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
the signatures are valid, or fail (with a SynapseError) if not.
"""
# (currently this is written assuming the v1 room structure; we'll probably want a
# separate function for checking v2 rooms)
# we want to check that the event is signed by:
#
# (a) the sender's server
@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
for p in pdus
]
v = KNOWN_ROOM_VERSIONS.get(room_version)
if not v:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
pdus_to_check_sender = [
@ -264,10 +265,17 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
if not _is_invite_via_3pid(p.pdu)
]
more_deferreds = keyring.verify_json_objects_for_server([
(p.sender_domain, p.redacted_pdu_json, 0)
for p in pdus_to_check_sender
])
more_deferreds = keyring.verify_json_objects_for_server(
[
(
p.sender_domain,
p.redacted_pdu_json,
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
p.pdu.event_id,
)
for p in pdus_to_check_sender
]
)
def sender_err(e, pdu_to_check):
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
@ -287,20 +295,23 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
v = KNOWN_ROOM_VERSIONS.get(room_version)
if not v:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
if v.event_format == EventFormatVersions.V1:
pdus_to_check_event_id = [
p for p in pdus_to_check
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
]
more_deferreds = keyring.verify_json_objects_for_server([
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0)
for p in pdus_to_check_event_id
])
more_deferreds = keyring.verify_json_objects_for_server(
[
(
get_domain_from_id(p.pdu.event_id),
p.redacted_pdu_json,
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
p.pdu.event_id,
)
for p in pdus_to_check_event_id
]
)
def event_err(e, pdu_to_check):
errmsg = (

View File

@ -17,7 +17,6 @@
import copy
import itertools
import logging
import random
from six.moves import range
@ -233,7 +232,8 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
Deferred: Results in the requested PDU.
Deferred: Results in the requested PDU, or None if we were unable to find
it.
"""
# TODO: Rate limit the number of times we try and get the same event.
@ -258,7 +258,12 @@ class FederationClient(FederationBase):
destination, event_id, timeout=timeout,
)
logger.debug("transaction_data %r", transaction_data)
logger.debug(
"retrieved event id %s from %s: %r",
event_id,
destination,
transaction_data,
)
pdu_list = [
event_from_pdu_json(p, format_ver, outlier=outlier)
@ -280,6 +285,7 @@ class FederationClient(FederationBase):
"Failed to get PDU %s from %s because %s",
event_id, destination, e,
)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
@ -326,12 +332,16 @@ class FederationClient(FederationBase):
state_event_ids = result["pdu_ids"]
auth_event_ids = result.get("auth_chain_ids", [])
fetched_events, failed_to_fetch = yield self.get_events(
[destination], room_id, set(state_event_ids + auth_event_ids)
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
destination, room_id, set(state_event_ids + auth_event_ids)
)
if failed_to_fetch:
logger.warn("Failed to get %r", failed_to_fetch)
logger.warning(
"Failed to fetch missing state/auth events for %s: %s",
room_id,
failed_to_fetch
)
event_map = {
ev.event_id: ev for ev in fetched_events
@ -397,27 +407,20 @@ class FederationClient(FederationBase):
defer.returnValue((signed_pdus, signed_auth))
@defer.inlineCallbacks
def get_events(self, destinations, room_id, event_ids, return_local=True):
"""Fetch events from some remote destinations, checking if we already
have them.
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
"""Fetch events from a remote destination, checking if we already have them.
Args:
destinations (list)
destination (str)
room_id (str)
event_ids (list)
return_local (bool): Whether to include events we already have in
the DB in the returned list of events
Returns:
Deferred: A deferred resolving to a 2-tuple where the first is a list of
events and the second is a list of event ids that we failed to fetch.
"""
if return_local:
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
else:
seen_events = yield self.store.have_seen_events(event_ids)
signed_events = []
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
failed_to_fetch = set()
@ -428,10 +431,11 @@ class FederationClient(FederationBase):
if not missing_events:
defer.returnValue((signed_events, failed_to_fetch))
def random_server_list():
srvs = list(destinations)
random.shuffle(srvs)
return srvs
logger.debug(
"Fetching unknown state/auth events %s for room %s",
missing_events,
event_ids,
)
room_version = yield self.store.get_room_version(room_id)
@ -443,7 +447,7 @@ class FederationClient(FederationBase):
deferreds = [
run_in_background(
self.get_pdu,
destinations=random_server_list(),
destinations=[destination],
event_id=e_id,
room_version=room_version,
)

View File

@ -349,9 +349,10 @@ class PerDestinationQueue(object):
@defer.inlineCallbacks
def _get_new_device_messages(self, limit):
last_device_list = self._last_device_list_stream_id
# Will return at most 20 entries
# Retrieve list of new device updates to send to the destination
now_stream_id, results = yield self._store.get_devices_by_remote(
self._destination, last_device_list
self._destination, last_device_list, limit=limit,
)
edus = [
Edu(

View File

@ -140,7 +140,9 @@ class Authenticator(object):
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
)
yield self.keyring.verify_json_for_server(origin, json_request, now)
yield self.keyring.verify_json_for_server(
origin, json_request, now, "Incoming request"
)
logger.info("Request from %s", origin)
request.authenticated_entity = origin

View File

@ -101,7 +101,9 @@ class GroupAttestationSigning(object):
if valid_until_ms < now:
raise SynapseError(400, "Attestation expired")
yield self.keyring.verify_json_for_server(server_name, attestation, now)
yield self.keyring.verify_json_for_server(
server_name, attestation, now, "Group attestation"
)
def create_attestation(self, group_id, user_id):
"""Create an attestation for the group_id and user_id with default

View File

@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
defer.returnValue(params)
@defer.inlineCallbacks
def check_auth(self, flows, clientdict, clientip):
def check_auth(self, flows, clientdict, clientip, password_servlet=False):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@ -186,6 +186,16 @@ class AuthHandler(BaseHandler):
clientip (str): The IP address of the client.
password_servlet (bool): Whether the request originated from
PasswordRestServlet.
XXX: This is a temporary hack to distinguish between checking
for threepid validations locally (in the case of password
resets) and using the identity server (in the case of binding
a 3PID during registration). Once we start using the
homeserver for both tasks, this distinction will no longer be
necessary.
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
(creds, params, session_id).
@ -241,7 +251,9 @@ class AuthHandler(BaseHandler):
if 'type' in authdict:
login_type = authdict['type']
try:
result = yield self._check_auth_dict(authdict, clientip)
result = yield self._check_auth_dict(
authdict, clientip, password_servlet=password_servlet,
)
if result:
creds[login_type] = result
self._save_session(session)
@ -351,7 +363,7 @@ class AuthHandler(BaseHandler):
return sess.setdefault('serverdict', {}).get(key, default)
@defer.inlineCallbacks
def _check_auth_dict(self, authdict, clientip):
def _check_auth_dict(self, authdict, clientip, password_servlet=False):
"""Attempt to validate the auth dict provided by a client
Args:
@ -369,7 +381,13 @@ class AuthHandler(BaseHandler):
login_type = authdict['type']
checker = self.checkers.get(login_type)
if checker is not None:
res = yield checker(authdict, clientip)
# XXX: Temporary workaround for having Synapse handle password resets
# See AuthHandler.check_auth for further details
res = yield checker(
authdict,
clientip=clientip,
password_servlet=password_servlet,
)
defer.returnValue(res)
# build a v1-login-style dict out of the authdict and fall back to the
@ -383,7 +401,7 @@ class AuthHandler(BaseHandler):
defer.returnValue(canonical_id)
@defer.inlineCallbacks
def _check_recaptcha(self, authdict, clientip):
def _check_recaptcha(self, authdict, clientip, **kwargs):
try:
user_response = authdict["response"]
except KeyError:
@ -429,20 +447,20 @@ class AuthHandler(BaseHandler):
defer.returnValue(True)
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
def _check_email_identity(self, authdict, _):
return self._check_threepid('email', authdict)
def _check_email_identity(self, authdict, **kwargs):
return self._check_threepid('email', authdict, **kwargs)
def _check_msisdn(self, authdict, _):
def _check_msisdn(self, authdict, **kwargs):
return self._check_threepid('msisdn', authdict)
def _check_dummy_auth(self, authdict, _):
def _check_dummy_auth(self, authdict, **kwargs):
return defer.succeed(True)
def _check_terms_auth(self, authdict, _):
def _check_terms_auth(self, authdict, **kwargs):
return defer.succeed(True)
@defer.inlineCallbacks
def _check_threepid(self, medium, authdict):
def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
if 'threepid_creds' not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
@ -451,7 +469,29 @@ class AuthHandler(BaseHandler):
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
if (
not password_servlet
or self.hs.config.email_password_reset_behaviour == "remote"
):
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
elif self.hs.config.email_password_reset_behaviour == "local":
row = yield self.store.get_threepid_validation_session(
medium,
threepid_creds["client_secret"],
sid=threepid_creds["sid"],
)
threepid = {
"medium": row["medium"],
"address": row["address"],
"validated_at": row["validated_at"],
} if row else None
if row:
# Valid threepid returned, delete from the db
yield self.store.delete_threepid_session(threepid_creds["sid"])
else:
raise SynapseError(400, "Password resets are not enabled on this homeserver")
if not threepid:
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)

View File

@ -35,6 +35,7 @@ from synapse.api.errors import (
CodeMessageException,
FederationDeniedError,
FederationError,
RequestSendFailed,
StoreError,
SynapseError,
)
@ -2027,9 +2028,21 @@ class FederationHandler(BaseHandler):
"""
room_version = yield self.store.get_room_version(event.room_id)
yield self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
try:
yield self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
# We don't really mind if the above fails, so lets not fail
# processing if it does. However, it really shouldn't fail so
# let's still log as an exception since we'll still want to fix
# any bugs.
logger.exception(
"Failed to double check auth events for %s with remote. "
"Ignoring failure and continuing processing of event.",
event.event_id,
)
try:
self.auth.check(room_version, event, auth_events=auth_events)
except AuthError as e:
@ -2042,6 +2055,15 @@ class FederationHandler(BaseHandler):
):
"""Helper for do_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
doesn't then we talk to the remote server to compare state to see if
we can come to a consensus (e.g. if one server missed some valid
state).
This attempts to resovle any potential divergence of state between
servers, but is not essential and so failures should not block further
processing of the event.
Args:
origin (str):
event (synapse.events.EventBase):
@ -2088,9 +2110,15 @@ class FederationHandler(BaseHandler):
missing_auth,
)
try:
remote_auth_chain = yield self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
try:
remote_auth_chain = yield self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e)
return
seen_remotes = yield self.store.have_seen_events(
[e.event_id for e in remote_auth_chain]
@ -2236,12 +2264,18 @@ class FederationHandler(BaseHandler):
try:
# 2. Get remote difference.
result = yield self.federation_client.query_auth(
origin,
event.room_id,
event.event_id,
local_auth_chain,
)
try:
result = yield self.federation_client.query_auth(
origin,
event.room_id,
event.event_id,
local_auth_chain,
)
except RequestSendFailed as e:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to query auth from remote: %s", e)
return
seen_remotes = yield self.store.have_seen_events(
[e.event_id for e in result["auth_chain"]]

View File

@ -247,7 +247,14 @@ class IdentityHandler(BaseHandler):
defer.returnValue(changed)
@defer.inlineCallbacks
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
def requestEmailToken(
self,
id_server,
email,
client_secret,
send_attempt,
next_link=None,
):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
@ -259,7 +266,9 @@ class IdentityHandler(BaseHandler):
'client_secret': client_secret,
'send_attempt': send_attempt,
}
params.update(kwargs)
if next_link:
params.update({'next_link': next_link})
try:
data = yield self.http_client.post_json_get_json(

View File

@ -158,7 +158,13 @@ class PresenceHandler(object):
# have not yet been persisted
self.unpersisted_users_changes = set()
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
hs.get_reactor().addSystemEventTrigger(
"before",
"shutdown",
run_as_background_process,
"presence.on_shutdown",
self._on_shutdown,
)
self.serial_to_user = {}
self._next_serial = 1
@ -828,14 +834,17 @@ class PresenceHandler(object):
# joins.
continue
event = yield self.store.get_event(event_id)
if event.content.get("membership") != Membership.JOIN:
event = yield self.store.get_event(event_id, allow_none=True)
if not event or event.content.get("membership") != Membership.JOIN:
# We only care about joins
continue
if prev_event_id:
prev_event = yield self.store.get_event(prev_event_id)
if prev_event.content.get("membership") == Membership.JOIN:
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
if (
prev_event
and prev_event.content.get("membership") == Membership.JOIN
):
# Ignore changes to join events.
continue

View File

@ -115,6 +115,7 @@ class StatsHandler(StateDeltasHandler):
event_id = delta["event_id"]
stream_id = delta["stream_id"]
prev_event_id = delta["prev_event_id"]
stream_pos = delta["stream_id"]
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
@ -136,10 +137,15 @@ class StatsHandler(StateDeltasHandler):
event_content = {}
if event_id is not None:
event_content = (yield self.store.get_event(event_id)).content or {}
event = yield self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
# We use stream_pos here rather than fetch by event_id as event_id
# may be None
now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
# quantise time to the nearest bucket
now = yield self.store.get_received_ts(event_id)
now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
if typ == EventTypes.Member:
@ -149,9 +155,11 @@ class StatsHandler(StateDeltasHandler):
# compare them.
prev_event_content = {}
if prev_event_id is not None:
prev_event_content = (
yield self.store.get_event(prev_event_id)
).content
prev_event = yield self.store.get_event(
prev_event_id, allow_none=True,
)
if prev_event:
prev_event_content = prev_event.content
membership = event_content.get("membership", Membership.LEAVE)
prev_membership = prev_event_content.get("membership", Membership.LEAVE)

View File

@ -586,30 +586,42 @@ class SyncHandler(object):
)
# if the room has a name or canonical_alias set, we can skip
# calculating heroes. we assume that if the event has contents, it'll
# be a valid name or canonical_alias - i.e. we're checking that they
# haven't been "deleted" by blatting {} over the top.
# calculating heroes. Empty strings are falsey, so we check
# for the "name" value and default to an empty string.
if name_id:
name = yield self.store.get_event(name_id, allow_none=True)
if name and name.content:
if name and name.content.get("name"):
defer.returnValue(summary)
if canonical_alias_id:
canonical_alias = yield self.store.get_event(
canonical_alias_id, allow_none=True,
)
if canonical_alias and canonical_alias.content:
if canonical_alias and canonical_alias.content.get("alias"):
defer.returnValue(summary)
me = sync_config.user.to_string()
joined_user_ids = [
r[0] for r in details.get(Membership.JOIN, empty_ms).members
r[0]
for r in details.get(Membership.JOIN, empty_ms).members
if r[0] != me
]
invited_user_ids = [
r[0] for r in details.get(Membership.INVITE, empty_ms).members
r[0]
for r in details.get(Membership.INVITE, empty_ms).members
if r[0] != me
]
gone_user_ids = (
[r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
[r[0] for r in details.get(Membership.BAN, empty_ms).members]
[
r[0]
for r in details.get(Membership.LEAVE, empty_ms).members
if r[0] != me
] + [
r[0]
for r in details.get(Membership.BAN, empty_ms).members
if r[0] != me
]
)
# FIXME: only build up a member_ids list for our heroes
@ -624,22 +636,13 @@ class SyncHandler(object):
member_ids[user_id] = event_id
# FIXME: order by stream ordering rather than as returned by SQL
me = sync_config.user.to_string()
if (joined_user_ids or invited_user_ids):
summary['m.heroes'] = sorted(
[
user_id
for user_id in (joined_user_ids + invited_user_ids)
if user_id != me
]
[user_id for user_id in (joined_user_ids + invited_user_ids)]
)[0:5]
else:
summary['m.heroes'] = sorted(
[
user_id
for user_id in gone_user_ids
if user_id != me
]
[user_id for user_id in gone_user_ids]
)[0:5]
if not sync_config.filter_collection.lazy_load_members():

View File

@ -80,10 +80,10 @@ ALLOWED_ATTRS = {
class Mailer(object):
def __init__(self, hs, app_name, notif_template_html, notif_template_text):
def __init__(self, hs, app_name, template_html, template_text):
self.hs = hs
self.notif_template_html = notif_template_html
self.notif_template_text = notif_template_text
self.template_html = template_html
self.template_text = template_text
self.sendmail = self.hs.get_sendmail()
self.store = self.hs.get_datastore()
@ -93,22 +93,49 @@ class Mailer(object):
logger.info("Created Mailer for app_name %s" % app_name)
@defer.inlineCallbacks
def send_password_reset_mail(
self,
email_address,
token,
client_secret,
sid,
):
"""Send an email with a password reset link to a user
Args:
email_address (str): Email address we're sending the password
reset to
token (str): Unique token generated by the server to verify
password reset email was received
client_secret (str): Unique token generated by the client to
group together multiple email sending attempts
sid (str): The generated session ID
"""
if email.utils.parseaddr(email_address)[1] == '':
raise RuntimeError("Invalid 'to' email address")
link = (
self.hs.config.public_baseurl +
"_synapse/password_reset/email/submit_token"
"?token=%s&client_secret=%s&sid=%s" %
(token, client_secret, sid)
)
template_vars = {
"link": link,
}
yield self.send_email(
email_address,
"[%s] Password Reset Email" % self.hs.config.server_name,
template_vars,
)
@defer.inlineCallbacks
def send_notification_mail(self, app_id, user_id, email_address,
push_actions, reason):
try:
from_string = self.hs.config.email_notif_from % {
"app": self.app_name
}
except TypeError:
from_string = self.hs.config.email_notif_from
raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1]
if raw_to == '':
raise RuntimeError("Invalid 'to' address")
"""Send email regarding a user's room notifications"""
rooms_in_order = deduped_ordered_list(
[pa['room_id'] for pa in push_actions]
)
@ -176,14 +203,36 @@ class Mailer(object):
"reason": reason,
}
html_text = self.notif_template_html.render(**template_vars)
yield self.send_email(
email_address,
"[%s] %s" % (self.app_name, summary_text),
template_vars,
)
@defer.inlineCallbacks
def send_email(self, email_address, subject, template_vars):
"""Send an email with the given information and template text"""
try:
from_string = self.hs.config.email_notif_from % {
"app": self.app_name
}
except TypeError:
from_string = self.hs.config.email_notif_from
raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1]
if raw_to == '':
raise RuntimeError("Invalid 'to' address")
html_text = self.template_html.render(**template_vars)
html_part = MIMEText(html_text, "html", "utf8")
plain_text = self.notif_template_text.render(**template_vars)
plain_text = self.template_text.render(**template_vars)
text_part = MIMEText(plain_text, "plain", "utf8")
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
multipart_msg['Subject'] = subject
multipart_msg['From'] = from_string
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()

View File

@ -70,8 +70,8 @@ class PusherFactory(object):
mailer = Mailer(
hs=self.hs,
app_name=app_name,
notif_template_html=self.notif_template_html,
notif_template_text=self.notif_template_text,
template_html=self.notif_template_html,
template_text=self.notif_template_text,
)
self.mailers[app_name] = mailer
return EmailPusher(self.hs, pusherdict, mailer)

View File

@ -77,7 +77,7 @@ REQUIREMENTS = [
]
CONDITIONAL_REQUIREMENTS = {
"email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
"email": ["Jinja2>=2.9", "bleach>=1.4.2"],
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
# we use execute_batch, which arrived in psycopg 2.7.

View File

@ -0,0 +1,9 @@
<html>
<body>
<p>A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:</p>
<a href="{{ link }}">{{ link }}</a>
<p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
</body>
</html>

View File

@ -0,0 +1,7 @@
A password reset request has been received for your Matrix account. If this
was you, please click the link below to confirm resetting your password:
{{ link }}
If this was not you, please disregard this email and contact your server
administrator. Thank you.

View File

@ -0,0 +1,6 @@
<html>
<head></head>
<body>
<p>{{ failure_reason }}. Your password has not been reset.</p>
</body>
</html>

View File

@ -0,0 +1,6 @@
<html>
<head></head>
<body>
<p>Your password was successfully reset. You may now close this window.</p>
</body>
</html>

View File

@ -15,19 +15,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from six.moves import http_client
import jinja2
from twisted.internet import defer
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, SynapseError
from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import random_string
from synapse.util.threepids import check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
@ -41,17 +47,42 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
def __init__(self, hs):
super(EmailPasswordRequestTokenRestServlet, self).__init__()
self.hs = hs
self.datastore = hs.get_datastore()
self.config = hs.config
self.identity_handler = hs.get_handlers().identity_handler
if self.config.email_password_reset_behaviour == "local":
from synapse.push.mailer import Mailer, load_jinja2_templates
templates = load_jinja2_templates(
config=hs.config,
template_html_name=hs.config.email_password_reset_template_html,
template_text_name=hs.config.email_password_reset_template_text,
)
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email_app_name,
template_html=templates[0],
template_text=templates[1],
)
@defer.inlineCallbacks
def on_POST(self, request):
if self.config.email_password_reset_behaviour == "off":
raise SynapseError(400, "Password resets have been disabled on this server")
body = parse_json_object_from_request(request)
assert_params_in_dict(body, [
'id_server', 'client_secret', 'email', 'send_attempt'
'client_secret', 'email', 'send_attempt'
])
if not check_3pid_allowed(self.hs, "email", body['email']):
# Extract params from body
client_secret = body["client_secret"]
email = body["email"]
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
"Your email domain is not authorized on this server",
@ -59,15 +90,100 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
)
existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
'email', body['email']
'email', email,
)
if existingUid is None:
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
ret = yield self.identity_handler.requestEmailToken(**body)
if self.config.email_password_reset_behaviour == "remote":
if 'id_server' not in body:
raise SynapseError(400, "Missing 'id_server' param in body")
# Have the identity server handle the password reset flow
ret = yield self.identity_handler.requestEmailToken(
body["id_server"], email, client_secret, send_attempt, next_link,
)
else:
# Send password reset emails from Synapse
sid = yield self.send_password_reset(
email, client_secret, send_attempt, next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
defer.returnValue((200, ret))
@defer.inlineCallbacks
def send_password_reset(
self,
email,
client_secret,
send_attempt,
next_link=None,
):
"""Send a password reset email
Args:
email (str): The user's email address
client_secret (str): The provided client secret
send_attempt (int): Which send attempt this is
Returns:
The new session_id upon success
Raises:
SynapseError is an error occurred when sending the email
"""
# Check that this email/client_secret/send_attempt combo is new or
# greater than what we've seen previously
session = yield self.datastore.get_threepid_validation_session(
"email", client_secret, address=email, validated=False,
)
# Check to see if a session already exists and that it is not yet
# marked as validated
if session and session.get("validated_at") is None:
session_id = session['session_id']
last_send_attempt = session['last_send_attempt']
# Check that the send_attempt is higher than previous attempts
if send_attempt <= last_send_attempt:
# If not, just return a success without sending an email
defer.returnValue(session_id)
else:
# An non-validated session does not exist yet.
# Generate a session id
session_id = random_string(16)
# Generate a new validation token
token = random_string(32)
# Send the mail with the link containing the token, client_secret
# and session_id
try:
yield self.mailer.send_password_reset_mail(
email, token, client_secret, session_id,
)
except Exception:
logger.exception(
"Error sending a password reset email to %s", email,
)
raise SynapseError(
500, "An error was encountered when sending the password reset email"
)
token_expires = (self.hs.clock.time_msec() +
self.config.email_validation_token_lifetime)
yield self.datastore.start_or_continue_validation_session(
"email", email, session_id, client_secret, send_attempt,
next_link, token, token_expires,
)
defer.returnValue(session_id)
class MsisdnPasswordRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password/msisdn/requestToken$")
@ -80,6 +196,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
@defer.inlineCallbacks
def on_POST(self, request):
if not self.config.email_password_reset_behaviour == "off":
raise SynapseError(400, "Password resets have been disabled on this server")
body = parse_json_object_from_request(request)
assert_params_in_dict(body, [
@ -107,6 +226,118 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
defer.returnValue((200, ret))
class PasswordResetSubmitTokenServlet(RestServlet):
"""Handles 3PID validation token submission"""
PATTERNS = [
re.compile("^/_synapse/password_reset/(?P<medium>[^/]*)/submit_token/*$"),
]
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(PasswordResetSubmitTokenServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.config = hs.config
self.clock = hs.get_clock()
self.datastore = hs.get_datastore()
@defer.inlineCallbacks
def on_GET(self, request, medium):
if medium != "email":
raise SynapseError(
400,
"This medium is currently not supported for password resets",
)
sid = parse_string(request, "sid")
client_secret = parse_string(request, "client_secret")
token = parse_string(request, "token")
# Attempt to validate a 3PID sesssion
try:
# Mark the session as valid
next_link = yield self.datastore.validate_threepid_session(
sid,
client_secret,
token,
self.clock.time_msec(),
)
# Perform a 302 redirect if next_link is set
if next_link:
if next_link.startswith("file:///"):
logger.warn(
"Not redirecting to next_link as it is a local file: address"
)
else:
request.setResponseCode(302)
request.setHeader("Location", next_link)
finish_request(request)
defer.returnValue(None)
# Otherwise show the success template
html = self.config.email_password_reset_success_html_content
request.setResponseCode(200)
except ThreepidValidationError as e:
# Show a failure page with a reason
html = self.load_jinja2_template(
self.config.email_template_dir,
self.config.email_password_reset_failure_template,
template_vars={
"failure_reason": e.msg,
}
)
request.setResponseCode(e.code)
request.write(html.encode('utf-8'))
finish_request(request)
defer.returnValue(None)
def load_jinja2_template(self, template_dir, template_filename, template_vars):
"""Loads a jinja2 template with variables to insert
Args:
template_dir (str): The directory where templates are stored
template_filename (str): The name of the template in the template_dir
template_vars (Dict): Dictionary of keys in the template
alongside their values to insert
Returns:
str containing the contents of the rendered template
"""
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
template = env.get_template(template_filename)
return template.render(**template_vars)
@defer.inlineCallbacks
def on_POST(self, request, medium):
if medium != "email":
raise SynapseError(
400,
"This medium is currently not supported for password resets",
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, [
'sid', 'client_secret', 'token',
])
valid, _ = yield self.datastore.validate_threepid_validation_token(
body['sid'],
body['client_secret'],
body['token'],
self.clock.time_msec(),
)
response_code = 200 if valid else 400
defer.returnValue((response_code, {"success": valid}))
class PasswordRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password$")
@ -144,6 +375,7 @@ class PasswordRestServlet(RestServlet):
result, params, _ = yield self.auth_handler.check_auth(
[[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]],
body, self.hs.get_ip_from_request(request),
password_servlet=True,
)
if LoginType.EMAIL_IDENTITY in result:
@ -417,6 +649,7 @@ class WhoamiRestServlet(RestServlet):
def register_servlets(hs, http_server):
EmailPasswordRequestTokenRestServlet(hs).register(http_server)
MsisdnPasswordRequestTokenRestServlet(hs).register(http_server)
PasswordResetSubmitTokenServlet(hs).register(http_server)
PasswordRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
EmailThreepidRequestTokenRestServlet(hs).register(http_server)

View File

@ -39,6 +39,7 @@ class VersionsRestServlet(RestServlet):
"r0.2.0",
"r0.3.0",
"r0.4.0",
"r0.5.0",
],
# as per MSC1497:
"unstable_features": {

View File

@ -588,6 +588,10 @@ class SQLBaseStore(object):
Args:
table : string giving the table name
values : dict of new column names and values for them
or_ignore : bool stating whether an exception should be raised
when a conflicting row already exists. If True, False will be
returned by the function instead
desc : string giving a description of the transaction
Returns:
bool: Whether the row was inserted or not. Only useful when
@ -1228,8 +1232,8 @@ class SQLBaseStore(object):
)
txn.execute(select_sql, list(keyvalues.values()))
row = txn.fetchone()
if not row:
if allow_none:
return None

View File

@ -14,7 +14,7 @@
# limitations under the License.
import logging
from six import iteritems, itervalues
from six import iteritems
from canonicaljson import json
@ -72,11 +72,14 @@ class DeviceWorkerStore(SQLBaseStore):
defer.returnValue({d["device_id"]: d for d in devices})
def get_devices_by_remote(self, destination, from_stream_id):
@defer.inlineCallbacks
def get_devices_by_remote(self, destination, from_stream_id, limit):
"""Get stream of updates to send to remote servers
Returns:
(int, list[dict]): current stream id and list of updates
Deferred[tuple[int, list[dict]]]:
current stream id (ie, the stream id of the last update included in the
response), and the list of updates
"""
now_stream_id = self._device_list_id_gen.get_current_token()
@ -84,55 +87,131 @@ class DeviceWorkerStore(SQLBaseStore):
destination, int(from_stream_id)
)
if not has_changed:
return (now_stream_id, [])
defer.returnValue((now_stream_id, []))
return self.runInteraction(
# We retrieve n+1 devices from the list of outbound pokes where n is
# our outbound device update limit. We then check if the very last
# device has the same stream_id as the second-to-last device. If so,
# then we ignore all devices with that stream_id and only send the
# devices with a lower stream_id.
#
# If when culling the list we end up with no devices afterwards, we
# consider the device update to be too large, and simply skip the
# stream_id; the rationale being that such a large device list update
# is likely an error.
updates = yield self.runInteraction(
"get_devices_by_remote",
self._get_devices_by_remote_txn,
destination,
from_stream_id,
now_stream_id,
limit + 1,
)
def _get_devices_by_remote_txn(
self, txn, destination, from_stream_id, now_stream_id
):
sql = """
SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes
WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
GROUP BY user_id, device_id
LIMIT 20
"""
txn.execute(sql, (destination, from_stream_id, now_stream_id, False))
# Return an empty list if there are no updates
if not updates:
defer.returnValue((now_stream_id, []))
# if we have exceeded the limit, we need to exclude any results with the
# same stream_id as the last row.
if len(updates) > limit:
stream_id_cutoff = updates[-1][2]
now_stream_id = stream_id_cutoff - 1
else:
stream_id_cutoff = None
# Perform the equivalent of a GROUP BY
#
# Iterate through the updates list and copy non-duplicate
# (user_id, device_id) entries into a map, with the value being
# the max stream_id across each set of duplicate entries
#
# maps (user_id, device_id) -> stream_id
query_map = {(r[0], r[1]): r[2] for r in txn}
# as long as their stream_id does not match that of the last row
query_map = {}
for update in updates:
if stream_id_cutoff is not None and update[2] >= stream_id_cutoff:
# Stop processing updates
break
key = (update[0], update[1])
query_map[key] = max(query_map.get(key, 0), update[2])
# If we didn't find any updates with a stream_id lower than the cutoff, it
# means that there are more than limit updates all of which have the same
# steam_id.
# That should only happen if a client is spamming the server with new
# devices, in which case E2E isn't going to work well anyway. We'll just
# skip that stream_id and return an empty list, and continue with the next
# stream_id next time.
if not query_map:
return (now_stream_id, [])
defer.returnValue((stream_id_cutoff, []))
if len(query_map) >= 20:
now_stream_id = max(stream_id for stream_id in itervalues(query_map))
results = yield self._get_device_update_edus_by_remote(
destination,
from_stream_id,
query_map,
)
devices = self._get_e2e_device_keys_txn(
txn,
defer.returnValue((now_stream_id, results))
def _get_devices_by_remote_txn(
self, txn, destination, from_stream_id, now_stream_id, limit
):
"""Return device update information for a given remote destination
Args:
txn (LoggingTransaction): The transaction to execute
destination (str): The host the device updates are intended for
from_stream_id (int): The minimum stream_id to filter updates by, exclusive
now_stream_id (int): The maximum stream_id to filter updates by, inclusive
limit (int): Maximum number of device updates to return
Returns:
List: List of device updates
"""
sql = """
SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes
WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
ORDER BY stream_id
LIMIT ?
"""
txn.execute(sql, (destination, from_stream_id, now_stream_id, False, limit))
return list(txn)
@defer.inlineCallbacks
def _get_device_update_edus_by_remote(
self, destination, from_stream_id, query_map,
):
"""Returns a list of device update EDUs as well as E2EE keys
Args:
destination (str): The host the device updates are intended for
from_stream_id (int): The minimum stream_id to filter updates by, exclusive
query_map (Dict[(str, str): int]): Dictionary mapping
user_id/device_id to update stream_id
Returns:
List[Dict]: List of objects representing an device update EDU
"""
devices = yield self.runInteraction(
"_get_e2e_device_keys_txn",
self._get_e2e_device_keys_txn,
query_map.keys(),
include_all_devices=True,
include_deleted_devices=True,
)
prev_sent_id_sql = """
SELECT coalesce(max(stream_id), 0) as stream_id
FROM device_lists_outbound_last_success
WHERE destination = ? AND user_id = ? AND stream_id <= ?
"""
results = []
for user_id, user_devices in iteritems(devices):
# The prev_id for the first row is always the last row before
# `from_stream_id`
txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
rows = txn.fetchall()
prev_id = rows[0][0]
prev_id = yield self._get_last_device_update_for_remote_user(
destination, user_id, from_stream_id,
)
for device_id, device in iteritems(user_devices):
stream_id = query_map[(user_id, device_id)]
result = {
@ -156,7 +235,22 @@ class DeviceWorkerStore(SQLBaseStore):
results.append(result)
return (now_stream_id, results)
defer.returnValue(results)
def _get_last_device_update_for_remote_user(
self, destination, user_id, from_stream_id,
):
def f(txn):
prev_sent_id_sql = """
SELECT coalesce(max(stream_id), 0) as stream_id
FROM device_lists_outbound_last_success
WHERE destination = ? AND user_id = ? AND stream_id <= ?
"""
txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
rows = txn.fetchall()
return rows[0][0]
return self.runInteraction("get_last_device_update_for_remote_user", f)
def mark_as_sent_devices_by_remote(self, destination, stream_id):
"""Mark that updates have successfully been sent to the destination.

View File

@ -78,6 +78,43 @@ class EventsWorkerStore(SQLBaseStore):
desc="get_received_ts",
)
def get_received_ts_by_stream_pos(self, stream_ordering):
"""Given a stream ordering get an approximate timestamp of when it
happened.
This is done by simply taking the received ts of the first event that
has a stream ordering greater than or equal to the given stream pos.
If none exists returns the current time, on the assumption that it must
have happened recently.
Args:
stream_ordering (int)
Returns:
Deferred[int]
"""
def _get_approximate_received_ts_txn(txn):
sql = """
SELECT received_ts FROM events
WHERE stream_ordering >= ?
LIMIT 1
"""
txn.execute(sql, (stream_ordering,))
row = txn.fetchone()
if row and row[0]:
ts = row[0]
else:
ts = self.clock.time_msec()
return ts
return self.runInteraction(
"get_approximate_received_ts",
_get_approximate_received_ts_txn,
)
@defer.inlineCallbacks
def get_event(
self,

View File

@ -80,6 +80,14 @@ class KeyStore(SQLBaseStore):
for row in txn:
server_name, key_id, key_bytes, ts_valid_until_ms = row
if ts_valid_until_ms is None:
# Old keys may be stored with a ts_valid_until_ms of null,
# in which case we treat this as if it was set to `0`, i.e.
# it won't match key requests that define a minimum
# `ts_valid_until_ms`.
ts_valid_until_ms = 0
res = FetchKeyResult(
verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)),
valid_until_ts=ts_valid_until_ms,

View File

@ -20,12 +20,14 @@ import logging
import os
import re
from synapse.storage.engines.postgres import PostgresEngine
logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
SCHEMA_VERSION = 54
SCHEMA_VERSION = 55
dir_path = os.path.abspath(os.path.dirname(__file__))
@ -115,8 +117,16 @@ def _setup_new_database(cur, database_engine):
valid_dirs = []
pattern = re.compile(r"^\d+(\.sql)?$")
if isinstance(database_engine, PostgresEngine):
specific = "postgres"
else:
specific = "sqlite"
specific_pattern = re.compile(r"^\d+(\.sql." + specific + r")?$")
for filename in directory_entries:
match = pattern.match(filename)
match = pattern.match(filename) or specific_pattern.match(filename)
abs_path = os.path.join(current_dir, filename)
if match and os.path.isdir(abs_path):
ver = int(match.group(0))
@ -136,7 +146,9 @@ def _setup_new_database(cur, database_engine):
directory_entries = os.listdir(sql_dir)
for filename in fnmatch.filter(directory_entries, "*.sql"):
for filename in sorted(fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter(
directory_entries, "*.sql." + specific
)):
sql_loc = os.path.join(sql_dir, filename)
logger.debug("Applying schema %s", sql_loc)
executescript(cur, sql_loc)

View File

@ -17,17 +17,20 @@
import re
from six import iterkeys
from six.moves import range
from twisted.internet import defer
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, StoreError
from synapse.api.errors import Codes, StoreError, ThreepidValidationError
from synapse.storage import background_updates
from synapse.storage._base import SQLBaseStore
from synapse.types import UserID
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
class RegistrationWorkerStore(SQLBaseStore):
def __init__(self, db_conn, hs):
@ -422,7 +425,7 @@ class RegistrationWorkerStore(SQLBaseStore):
defer.returnValue(None)
@defer.inlineCallbacks
def get_user_id_by_threepid(self, medium, address):
def get_user_id_by_threepid(self, medium, address, require_verified=False):
"""Returns user id from threepid
Args:
@ -595,6 +598,11 @@ class RegistrationStore(
"user_threepids_grandfather", self._bg_user_threepids_grandfather,
)
# Create a background job for culling expired 3PID validity tokens
hs.get_clock().looping_call(
self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS,
)
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id=None):
"""Adds an access token for the given user.
@ -963,7 +971,6 @@ class RegistrationStore(
We do this by grandfathering in existing user threepids assuming that
they used one of the server configured trusted identity servers.
"""
id_servers = set(self.config.trusted_third_party_id_servers)
def _bg_user_threepids_grandfather_txn(txn):
@ -984,3 +991,280 @@ class RegistrationStore(
yield self._end_background_update("user_threepids_grandfather")
defer.returnValue(1)
def get_threepid_validation_session(
self,
medium,
client_secret,
address=None,
sid=None,
validated=None,
):
"""Gets a session_id and last_send_attempt (if available) for a
client_secret/medium/(address|session_id) combo
Args:
medium (str|None): The medium of the 3PID
address (str|None): The address of the 3PID
sid (str|None): The ID of the validation session
client_secret (str|None): A unique string provided by the client to
help identify this validation attempt
validated (bool|None): Whether sessions should be filtered by
whether they have been validated already or not. None to
perform no filtering
Returns:
deferred {str, int}|None: A dict containing the
latest session_id and send_attempt count for this 3PID.
Otherwise None if there hasn't been a previous attempt
"""
keyvalues = {
"medium": medium,
"client_secret": client_secret,
}
if address:
keyvalues["address"] = address
if sid:
keyvalues["session_id"] = sid
assert(address or sid)
def get_threepid_validation_session_txn(txn):
sql = """
SELECT address, session_id, medium, client_secret,
last_send_attempt, validated_at
FROM threepid_validation_session WHERE %s
""" % (" AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),)
if validated is not None:
sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
sql += " LIMIT 1"
txn.execute(sql, list(keyvalues.values()))
rows = self.cursor_to_dict(txn)
if not rows:
return None
return rows[0]
return self.runInteraction(
"get_threepid_validation_session",
get_threepid_validation_session_txn,
)
def validate_threepid_session(
self,
session_id,
client_secret,
token,
current_ts,
):
"""Attempt to validate a threepid session using a token
Args:
session_id (str): The id of a validation session
client_secret (str): A unique string provided by the client to
help identify this validation attempt
token (str): A validation token
current_ts (int): The current unix time in milliseconds. Used for
checking token expiry status
Returns:
deferred str|None: A str representing a link to redirect the user
to if there is one.
"""
# Insert everything into a transaction in order to run atomically
def validate_threepid_session_txn(txn):
row = self._simple_select_one_txn(
txn,
table="threepid_validation_session",
keyvalues={"session_id": session_id},
retcols=["client_secret", "validated_at"],
allow_none=True,
)
if not row:
raise ThreepidValidationError(400, "Unknown session_id")
retrieved_client_secret = row["client_secret"]
validated_at = row["validated_at"]
if retrieved_client_secret != client_secret:
raise ThreepidValidationError(
400, "This client_secret does not match the provided session_id",
)
row = self._simple_select_one_txn(
txn,
table="threepid_validation_token",
keyvalues={"session_id": session_id, "token": token},
retcols=["expires", "next_link"],
allow_none=True,
)
if not row:
raise ThreepidValidationError(
400, "Validation token not found or has expired",
)
expires = row["expires"]
next_link = row["next_link"]
# If the session is already validated, no need to revalidate
if validated_at:
return next_link
if expires <= current_ts:
raise ThreepidValidationError(
400, "This token has expired. Please request a new one",
)
# Looks good. Validate the session
self._simple_update_txn(
txn,
table="threepid_validation_session",
keyvalues={"session_id": session_id},
updatevalues={"validated_at": self.clock.time_msec()},
)
return next_link
# Return next_link if it exists
return self.runInteraction(
"validate_threepid_session_txn",
validate_threepid_session_txn,
)
def upsert_threepid_validation_session(
self,
medium,
address,
client_secret,
send_attempt,
session_id,
validated_at=None,
):
"""Upsert a threepid validation session
Args:
medium (str): The medium of the 3PID
address (str): The address of the 3PID
client_secret (str): A unique string provided by the client to
help identify this validation attempt
send_attempt (int): The latest send_attempt on this session
session_id (str): The id of this validation session
validated_at (int|None): The unix timestamp in milliseconds of
when the session was marked as valid
"""
insertion_values = {
"medium": medium,
"address": address,
"client_secret": client_secret,
}
if validated_at:
insertion_values["validated_at"] = validated_at
return self._simple_upsert(
table="threepid_validation_session",
keyvalues={"session_id": session_id},
values={"last_send_attempt": send_attempt},
insertion_values=insertion_values,
desc="upsert_threepid_validation_session",
)
def start_or_continue_validation_session(
self,
medium,
address,
session_id,
client_secret,
send_attempt,
next_link,
token,
token_expires,
):
"""Creates a new threepid validation session if it does not already
exist and associates a new validation token with it
Args:
medium (str): The medium of the 3PID
address (str): The address of the 3PID
session_id (str): The id of this validation session
client_secret (str): A unique string provided by the client to
help identify this validation attempt
send_attempt (int): The latest send_attempt on this session
next_link (str|None): The link to redirect the user to upon
successful validation
token (str): The validation token
token_expires (int): The timestamp for which after the token
will no longer be valid
"""
def start_or_continue_validation_session_txn(txn):
# Create or update a validation session
self._simple_upsert_txn(
txn,
table="threepid_validation_session",
keyvalues={"session_id": session_id},
values={"last_send_attempt": send_attempt},
insertion_values={
"medium": medium,
"address": address,
"client_secret": client_secret,
},
)
# Create a new validation token with this session ID
self._simple_insert_txn(
txn,
table="threepid_validation_token",
values={
"session_id": session_id,
"token": token,
"next_link": next_link,
"expires": token_expires,
},
)
return self.runInteraction(
"start_or_continue_validation_session",
start_or_continue_validation_session_txn,
)
def cull_expired_threepid_validation_tokens(self):
"""Remove threepid validation tokens with expiry dates that have passed"""
def cull_expired_threepid_validation_tokens_txn(txn, ts):
sql = """
DELETE FROM threepid_validation_token WHERE
expires < ?
"""
return txn.execute(sql, (ts,))
return self.runInteraction(
"cull_expired_threepid_validation_tokens",
cull_expired_threepid_validation_tokens_txn,
self.clock.time_msec(),
)
def delete_threepid_session(self, session_id):
"""Removes a threepid validation session from the database. This can
be done after validation has been performed and whatever action was
waiting on it has been carried out
Args:
session_id (str): The ID of the session to delete
"""
def delete_threepid_session_txn(txn):
self._simple_delete_txn(
txn,
table="threepid_validation_token",
keyvalues={"session_id": session_id},
)
self._simple_delete_txn(
txn,
table="threepid_validation_session",
keyvalues={"session_id": session_id},
)
return self.runInteraction(
"delete_threepid_session",
delete_threepid_session_txn,
)

View File

@ -0,0 +1,31 @@
/* Copyright 2019 The Matrix.org Foundation C.I.C.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS threepid_validation_session (
session_id TEXT PRIMARY KEY,
medium TEXT NOT NULL,
address TEXT NOT NULL,
client_secret TEXT NOT NULL,
last_send_attempt BIGINT NOT NULL,
validated_at BIGINT
);
CREATE TABLE IF NOT EXISTS threepid_validation_token (
token TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
next_link TEXT,
expires BIGINT NOT NULL
);
CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,260 @@
CREATE TABLE application_services_state( as_id TEXT PRIMARY KEY, state VARCHAR(5), last_txn INTEGER );
CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) );
CREATE INDEX application_services_txns_id ON application_services_txns ( as_id );
CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) );
CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_user_id TEXT NOT NULL, UNIQUE (observed_user_id, observer_user_id) );
CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) );
CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) );
CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER );
CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) );
CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering );
CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) );
CREATE INDEX event_json_room_id ON event_json(room_id);
CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) );
CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, UNIQUE (event_id), UNIQUE (room_id, type, state_key) );
CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) );
CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
CREATE TABLE topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, topic TEXT NOT NULL, UNIQUE (event_id) );
CREATE INDEX topics_room_id ON topics(room_id);
CREATE TABLE room_names( event_id TEXT NOT NULL, room_id TEXT NOT NULL, name TEXT NOT NULL, UNIQUE (event_id) );
CREATE INDEX room_names_room_id ON room_names(room_id);
CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT );
CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) );
CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) );
CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) );
CREATE INDEX push_rules_user_name on push_rules (user_name);
CREATE TABLE user_filters( user_id TEXT, filter_id BIGINT, filter_json bytea );
CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id );
CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) );
CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name);
CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
CREATE TABLE event_edges( event_id TEXT NOT NULL, prev_event_id TEXT NOT NULL, room_id TEXT NOT NULL, is_state BOOL NOT NULL, UNIQUE (event_id, prev_event_id, room_id, is_state) );
CREATE INDEX ev_edges_id ON event_edges(event_id);
CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
CREATE INDEX room_depth_room ON room_depth(room_id);
CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL );
CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) );
CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id);
CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) );
CREATE TABLE remote_media_cache_thumbnails ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, UNIQUE (event_id) );
CREATE INDEX redactions_redacts ON redactions (redacts);
CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) );
CREATE INDEX room_aliases_id ON room_aliases(room_id);
CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL );
CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias);
CREATE TABLE event_reference_hashes ( event_id TEXT, algorithm TEXT, hash bytea, UNIQUE (event_id, algorithm) );
CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id);
CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) );
CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) );
CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) );
CREATE TABLE receipts_graph( room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_ids TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id) );
CREATE TABLE receipts_linearized ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_id TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id) );
CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) );
CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
CREATE TABLE background_updates( update_name TEXT NOT NULL, progress_json TEXT NOT NULL, depends_on TEXT, CONSTRAINT background_updates_uniqueness UNIQUE (update_name) );
CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value )
/* event_search(event_id,room_id,sender,"key",value) */;
CREATE TABLE IF NOT EXISTS 'event_search_content'(docid INTEGER PRIMARY KEY, 'c0event_id', 'c1room_id', 'c2sender', 'c3key', 'c4value');
CREATE TABLE IF NOT EXISTS 'event_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB);
CREATE TABLE IF NOT EXISTS 'event_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx));
CREATE TABLE IF NOT EXISTS 'event_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB);
CREATE TABLE IF NOT EXISTS 'event_search_stat'(id INTEGER PRIMARY KEY, value BLOB);
CREATE TABLE guest_access( event_id TEXT NOT NULL, room_id TEXT NOT NULL, guest_access TEXT NOT NULL, UNIQUE (event_id) );
CREATE TABLE history_visibility( event_id TEXT NOT NULL, room_id TEXT NOT NULL, history_visibility TEXT NOT NULL, UNIQUE (event_id) );
CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) );
CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) );
CREATE TABLE IF NOT EXISTS "account_data_max_stream_id"( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT NOT NULL, CHECK (Lock='X') );
CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) );
CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) );
CREATE INDEX account_data_stream_id on account_data(user_id, stream_id);
CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id);
CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) );
CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id);
CREATE INDEX events_room_stream on events(room_id, stream_ordering);
CREATE INDEX public_room_index on rooms(is_public);
CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering );
CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN );
CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id);
CREATE INDEX presence_stream_user_id ON presence_stream(user_id);
CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT );
CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id);
CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id);
CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL );
CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT );
CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address);
CREATE TABLE local_invites( stream_id BIGINT NOT NULL, inviter TEXT NOT NULL, invitee TEXT NOT NULL, event_id TEXT NOT NULL, room_id TEXT NOT NULL, locally_rejected TEXT, replaced_by TEXT );
CREATE INDEX local_invites_id ON local_invites(stream_id);
CREATE INDEX local_invites_for_user_idx ON local_invites(invitee, locally_rejected, replaced_by, room_id);
CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id );
CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) );
CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms);
CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) );
CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT );
CREATE TABLE devices ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, display_name TEXT, CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) );
CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') );
CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL );
CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id);
CREATE INDEX received_transactions_ts ON received_transactions(ts);
CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL );
CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id);
CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL );
CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id);
CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL );
CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT);
CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id );
CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id );
CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL );
CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL );
CREATE INDEX evauth_edges_id ON event_auth(event_id);
CREATE INDEX user_threepids_medium_address on user_threepids (medium, address);
CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL );
CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id );
CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id);
CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL );
CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL );
CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL );
CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL );
CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id);
CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL );
CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id);
CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id);
CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL );
CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id);
CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') );
CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) );
CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id);
CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT );
CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id);
CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT );
CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id);
CREATE TABLE device_lists_outbound_last_success ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL );
CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success( destination, user_id, stream_id );
CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value )
/* user_directory_search(user_id,value) */;
CREATE TABLE IF NOT EXISTS 'user_directory_search_content'(docid INTEGER PRIMARY KEY, 'c0user_id', 'c1value');
CREATE TABLE IF NOT EXISTS 'user_directory_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB);
CREATE TABLE IF NOT EXISTS 'user_directory_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx));
CREATE TABLE IF NOT EXISTS 'user_directory_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB);
CREATE TABLE IF NOT EXISTS 'user_directory_search_stat'(id INTEGER PRIMARY KEY, value BLOB);
CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL );
CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id);
CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT );
CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts);
CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts);
CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id);
CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, is_public BOOLEAN NOT NULL );
CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL );
CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, is_public BOOLEAN NOT NULL );
CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) );
CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id);
CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, cat_order BIGINT NOT NULL, UNIQUE (group_id, category_id, cat_order), CHECK (cat_order > 0) );
CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id) );
CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL );
CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id);
CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, role_order BIGINT NOT NULL, UNIQUE (group_id, role_id, role_order), CHECK (role_order > 0) );
CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, role_id) );
CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL );
CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id);
CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id);
CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms);
CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, attestation_json TEXT NOT NULL );
CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id);
CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id);
CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms);
CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, is_publicised BOOLEAN NOT NULL, content TEXT NOT NULL );
CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id);
CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id);
CREATE TABLE local_group_updates ( stream_id BIGINT NOT NULL, group_id TEXT NOT NULL, user_id TEXT NOT NULL, type TEXT NOT NULL, content TEXT NOT NULL );
CREATE TABLE remote_profile_cache ( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, last_check BIGINT NOT NULL );
CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id);
CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check);
CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL );
CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id);
CREATE TABLE IF NOT EXISTS "groups" ( group_id TEXT NOT NULL, name TEXT, avatar_url TEXT, short_description TEXT, long_description TEXT, is_public BOOL NOT NULL , join_policy TEXT NOT NULL DEFAULT 'invite');
CREATE UNIQUE INDEX groups_idx ON groups(group_id);
CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT );
CREATE INDEX user_directory_room_idx ON user_directory(room_id);
CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id);
CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL );
CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id);
CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL );
CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id);
CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id);
CREATE INDEX group_users_u_idx ON group_users(user_id);
CREATE INDEX group_invites_u_idx ON group_invites(user_id);
CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id);
CREATE INDEX group_rooms_r_idx ON group_rooms(room_id);
CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL );
CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
CREATE TABLE erased_users ( user_id TEXT NOT NULL );
CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id);
CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL );
CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id);
CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp);
CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL );
CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL );
CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL );
CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);
CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL );
CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server );
CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL );
CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT );
CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT );
CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id);
CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key);
CREATE TABLE stats_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
CREATE TABLE user_stats ( user_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, public_rooms INT NOT NULL, private_rooms INT NOT NULL );
CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts);
CREATE TABLE room_stats ( room_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, current_state_events INT NOT NULL, joined_members INT NOT NULL, invited_members INT NOT NULL, left_members INT NOT NULL, banned_members INT NOT NULL, state_events INT NOT NULL );
CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts);
CREATE TABLE room_state ( room_id TEXT NOT NULL, join_rules TEXT, history_visibility TEXT, encryption TEXT, name TEXT, topic TEXT, avatar TEXT, canonical_alias TEXT );
CREATE UNIQUE INDEX room_state_room ON room_state(room_id);
CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL );
CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id);
CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id);
CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen);
CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering);
CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering);
CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering);
CREATE INDEX current_state_events_member_index ON current_state_events (state_key);
CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id);
CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id);
CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts);
CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen);
CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
CREATE INDEX users_creation_ts ON users (creation_ts);
CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key);
CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);

View File

@ -0,0 +1,7 @@
INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events;
INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1);
INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events;
INSERT INTO user_directory_stream_pos (stream_id) VALUES (0);
INSERT INTO stats_stream_pos (stream_id) VALUES (0);
INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0);

Some files were not shown because too many files have changed in this diff Show More