Merge remote-tracking branch 'origin/release-v1.79' into matrix-org-hotfixes

anoa/redirect_instances
David Robertson 2023-03-07 12:17:08 +00:00
commit a95e906227
No known key found for this signature in database
GPG Key ID: 903ECE108A39DEDD
228 changed files with 4149 additions and 2939 deletions

View File

@ -109,11 +109,26 @@ sytest_tests = [
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "focal",
"reactor": "asyncio",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
"reactor": "asyncio",
},
{
"sytest-tag": "testing",
"postgres": "postgres",

View File

@ -21,4 +21,8 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
# Update black to 23.1.0 (#15103)
9bb2eac71962970d02842bca441f4bcdbbf93a11

View File

@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0
uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@ -12,7 +12,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
@ -39,7 +39,7 @@ jobs:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0

View File

@ -48,7 +48,7 @@ jobs:
with:
ref: master
- name: Login to registry
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}

View File

@ -156,7 +156,8 @@ jobs:
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
# We use nightly so that it correctly groups together imports
toolchain: nightly-2022-12-01
components: rustfmt
- uses: Swatinem/rust-cache@v2
@ -368,6 +369,7 @@ jobs:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}

View File

@ -6,7 +6,7 @@ on:
jobs:
triage:
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2
with:
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
content_id: ${{ github.event.issue.node_id }}

View File

@ -1,3 +1,107 @@
Synapse 1.79.0rc1 (2023-03-07)
==============================
Features
--------
- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
- Experimental support for MSC3967 to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077))
- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107))
- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116))
- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133))
- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134))
- Remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172))
- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869))
- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088))
- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092))
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163))
- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174))
- Fix `test_icu_word_boundary_punctuation` for Alpine acos installed ICU versions. ([\#15177](https://github.com/matrix-org/synapse/issues/15177))
- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180))
- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193))
- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143))
Updates to the Docker image
---------------------------
- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186))
Improved Documentation
----------------------
- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026))
- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071))
- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112))
- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139))
- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148))
- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168))
Deprecations and Removals
-------------------------
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
Internal Changes
----------------
- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101))
- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918))
- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051))
- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095))
- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135))
- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138))
- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175))
- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164))
- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165))
- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167))
- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184))
- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188))
- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191))
- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192))
- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194))
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
- Remove unspecced and buggy `PUT` method on the unstable `/rooms/<room_id>/batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199))
- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215))
- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177))
- <details><summary>Locked dependency updates</summary>
- Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155))
- Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103))
- Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152))
- Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154))
- Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156))
- Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159))
- Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214))
- Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209))
- Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158))
- Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211))
- Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210))
- Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213))
- Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160))
- Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212))
- Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157))
</details>
Synapse 1.78.0 (2023-02-28)
===========================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150))
Synapse 1.78.0rc1 (2023-02-21)
==============================

4
Cargo.lock generated
View File

@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.93"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea"
dependencies = [
"itoa",
"ryu",

View File

@ -1 +0,0 @@
Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers.

12
debian/changelog vendored
View File

@ -1,3 +1,15 @@
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
* New Synapse release 1.79.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Mar 2023 12:03:49 +0000
matrix-synapse-py3 (1.78.0) stable; urgency=medium
* New Synapse release 1.78.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Feb 2023 08:56:03 -0800
matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium
* Add `matrix-org-archive-keyring` package as recommended.

View File

@ -142,6 +142,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@ -204,6 +205,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
@ -674,17 +676,21 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
if not os.path.exists(config_path):
log("Generating base homeserver config")
generate_base_homeserver_config()
else:
log("Base homeserver config exists—not regenerating")
# This script may be run multiple times (mostly by Complement, see note at top of file).
# Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
if not os.path.exists(mark_filepath):
# Always regenerate all other config files
log("Generating worker config files")
generate_worker_files(environ, config_path, data_dir)
# Mark workers as being configured
with open(mark_filepath, "w") as f:
f.write("")
else:
log("Worker config exists—not regenerating")
# Lifted right out of start.py
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)

View File

@ -169,3 +169,17 @@ The following fields are returned in the JSON response body:
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
have a canonical alias set.
* `event_json`: object - Details of the original event that was reported.
# Delete a specific event report
This API deletes a specific event report. If the request is successful, the response body
will be an empty JSON object.
The api is:
```
DELETE /_synapse/admin/v1/event_reports/<report_id>
```
**URL parameters:**
* `report_id`: string - The ID of the event report.

View File

@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
```python
async def check_media_file_for_spam(
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
file_info: "synapse.rest.media.v1._base.FileInfo",
file_wrapper: "synapse.media.media_storage.ReadableFileWrapper",
file_info: "synapse.media._base.FileInfo",
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```

View File

@ -146,6 +146,9 @@ Note that this callback is called when the event has already been processed and
into the room, which means this callback cannot be used to deny persisting the event. To
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
For any given event, this callback will be called on every worker process, even if that worker will not end up
acting on that event. This callback will not be called for events that are marked as rejected.
If multiple modules implement this callback, Synapse runs them all in order.
### `check_can_shutdown_room`
@ -251,6 +254,11 @@ If multiple modules implement this callback, Synapse runs them all in order.
_First introduced in Synapse v1.56.0_
**<span style="color:red">
This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which
features the same functionality. The only difference is in name.
</span>**
```python
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
```
@ -265,6 +273,44 @@ server_.
If multiple modules implement this callback, Synapse runs them all in order.
### `on_add_user_third_party_identifier`
_First introduced in Synapse v1.79.0_
```python
async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
```
Called after successfully creating an association between a user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier (i.e. an email address).
Note that this callback is _not_ called if a user attempts to bind their third-party identifier
to an identity server (via a call to [`POST
/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)).
If multiple modules implement this callback, Synapse runs them all in order.
### `on_remove_user_third_party_identifier`
_First introduced in Synapse v1.79.0_
```python
async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
```
Called after successfully removing an association between a user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier (i.e. an email address).
Note that this callback is _not_ called if a user attempts to unbind their third-party
identifier from an identity server (via a call to [`POST
/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)).
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the third-party rules callback
@ -297,4 +343,4 @@ class EventCensorer:
)
event_dict["content"] = new_event_content
return event_dict
```
```

View File

@ -83,3 +83,59 @@ the callback name as the argument name and the function as its value. A
Callbacks for each category can be found on their respective page of the
[Synapse documentation website](https://matrix-org.github.io/synapse).
## Caching
_Added in Synapse 1.74.0._
Modules can leverage Synapse's caching tools to manage their own cached functions. This
can be helpful for modules that need to repeatedly request the same data from the database
or a remote service.
Functions that need to be wrapped with a cache need to be decorated with a `@cached()`
decorator (which can be imported from `synapse.module_api`) and registered with the
[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888)
API when initialising the module. If the module needs to invalidate an entry in a cache,
it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904)
API, with the function to invalidate the cache of and the key(s) of the entry to
invalidate.
Below is an example of a simple module using a cached function:
```python
from typing import Any
from synapse.module_api import cached, ModuleApi
class MyModule:
def __init__(self, config: Any, api: ModuleApi):
self.api = api
# Register the cached function so Synapse knows how to correctly invalidate
# entries for it.
self.api.register_cached_function(self.get_user_from_id)
@cached()
async def get_department_for_user(self, user_id: str) -> str:
"""A function with a cache."""
# Request a department from an external service.
return await self.http_client.get_json(
"https://int.example.com/users", {"user_id": user_id)
)["department"]
async def do_something_with_users(self) -> None:
"""Calls the cached function and then invalidates an entry in its cache."""
user_id = "@alice:example.com"
# Get the user. Since get_department_for_user is wrapped with a cache,
# the return value for this user_id will be cached.
department = await self.get_department_for_user(user_id)
# Do something with `department`...
# Let's say something has changed with our user, and the entry we have for
# them in the cache is out of date, so we want to invalidate it.
await self.api.invalidate_cache(self.get_department_for_user, (user_id,))
```
See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details.

View File

@ -590,6 +590,47 @@ oidc_providers:
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
### Shibboleth with OIDC Plugin
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly.
2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret`
are randomly generated data.
```json
{
"client_id": "SOME-CLIENT-ID",
"client_secret": "SOME-SUPER-SECRET-SECRET",
"response_types": ["code"],
"grant_types": ["authorization_code"],
"scope": "openid profile email",
"redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"]
}
```
Synapse config:
```yaml
oidc_providers:
# Shibboleth IDP
#
- idp_id: shibboleth
idp_name: "Shibboleth Login"
discover: true
issuer: "https://YOUR-IDP-URL.TLD"
client_id: "YOUR_CLIENT_ID"
client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP"
scopes: ["openid", "profile", "email"]
allow_existing_users: true
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
subject_claim: "sub"
localpart_template: "{{ user.sub.split('@')[0] }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)

View File

@ -88,6 +88,30 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.79.0
## The `on_threepid_bind` module callback method has been deprecated
Synapse v1.79.0 deprecates the
[`on_threepid_bind`](modules/third_party_rules_callbacks.md#on_threepid_bind)
"third-party rules" Synapse module callback method in favour of a new module method,
[`on_add_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_add_user_third_party_identifier).
`on_threepid_bind` will be removed in a future version of Synapse. You should check whether any Synapse
modules in use in your deployment are making use of `on_threepid_bind`, and update them where possible.
The arguments and functionality of the new method are the same.
The justification behind the name change is that the old method's name, `on_threepid_bind`, was
misleading. A user is considered to "bind" their third-party ID to their Matrix ID only if they
do so via an [identity server](https://spec.matrix.org/latest/identity-service-api/)
(so that users on other homeservers may find them). But this method was not called in that case -
it was only called when a user added a third-party identifier on the local homeserver.
Module developers may also be interested in the related
[`on_remove_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_remove_user_third_party_identifier)
module callback method that was also added in Synapse v1.79.0. This new method is called when a
user removes a third-party identifier from their account.
# Upgrading to v1.78.0
## Deprecate the `/_synapse/admin/v1/media/<server_name>/delete` admin API

View File

@ -70,13 +70,55 @@ output-directory
│ ├───state
│ ├───invite_state
│ └───knock_state
└───user_data
├───account_data
│ ├───global
│ └───<room_id>
├───connections
├───devices
└───profile
├───user_data
│ ├───account_data
│ │ ├───global
│ │ └───<room_id>
│ ├───connections
│ ├───devices
│ └───profile
└───media_ids
└───<media_id>
```
The `media_ids` folder contains only the metadata of the media uploaded by the user.
It does not contain the media itself.
Furthermore, only the `media_ids` that Synapse manages itself are exported.
If another media repository (e.g. [matrix-media-repo](https://github.com/turt2live/matrix-media-repo))
is used, the data must be exported separately.
With the `media_ids` the media files can be downloaded.
Media that have been sent in encrypted rooms are only retrieved in encrypted form.
The following script can help with download the media files:
```bash
#!/usr/bin/env bash
# Parameters
#
# source_directory: Directory which contains the export with the media_ids.
# target_directory: Directory into which all files are to be downloaded.
# repository_url: Address of the media repository resp. media worker.
# serverName: Name of the server (`server_name` from homeserver.yaml).
#
# Example:
# ./download_media.sh /tmp/export_data/media_ids/ /tmp/export_data/media_files/ http://localhost:8008 matrix.example.com
source_directory=$1
target_directory=$2
repository_url=$3
serverName=$4
mkdir -p $target_directory
for file in $source_directory/*; do
filename=$(basename ${file})
url=$repository_url/_matrix/media/v3/download/$serverName/$filename
echo "Downloading $filename - $url"
if ! wget -o /dev/null -P $target_directory $url; then
echo "Could not download $filename"
fi
done
```
Manually resetting passwords
@ -87,7 +129,7 @@ can reset a user's password using the [admin API](../../admin_api/user_admin_api
I have a problem with my server. Can I just delete my database and start again?
---
Deleting your database is unlikely to make anything better.
Deleting your database is unlikely to make anything better.
It's easy to make the mistake of thinking that you can start again from a clean
slate by dropping your database, but things don't work like that in a federated
@ -102,7 +144,7 @@ Come and seek help in https://matrix.to/#/#synapse:matrix.org.
There are two exceptions when it might be sensible to delete your database and start again:
* You have *never* joined any rooms which are federated with other servers. For
instance, a local deployment which the outside world can't talk to.
instance, a local deployment which the outside world can't talk to.
* You are changing the `server_name` in the homeserver configuration. In effect
this makes your server a completely new one from the point of view of the network,
so in this case it makes sense to start with a clean database.
@ -115,7 +157,7 @@ Using the following curl command:
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
```
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
Access Token:\<click to reveal\>
Access Token:\<click to reveal\>
`<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org
@ -152,13 +194,13 @@ What are the biggest rooms on my server?
---
```sql
SELECT s.canonical_alias, g.room_id, count(*) AS num_rows
FROM
state_groups_state AS g,
room_stats_state AS s
WHERE g.room_id = s.room_id
SELECT s.canonical_alias, g.room_id, count(*) AS num_rows
FROM
state_groups_state AS g,
room_stats_state AS s
WHERE g.room_id = s.room_id
GROUP BY s.canonical_alias, g.room_id
ORDER BY num_rows desc
ORDER BY num_rows desc
LIMIT 10;
```

View File

@ -1105,7 +1105,7 @@ This setting should only be used in very specific cases, such as
federation over Tor hidden services and similar. For private networks
of homeservers, you likely want to use a private CA instead.
Only effective if `federation_verify_certicates` is `true`.
Only effective if `federation_verify_certificates` is `true`.
Example configuration:
```yaml
@ -1518,11 +1518,11 @@ rc_registration_token_validity:
This option specifies several limits for login:
* `address` ratelimits login requests based on the client's IP
address. Defaults to `per_second: 0.17`, `burst_count: 3`.
address. Defaults to `per_second: 0.003`, `burst_count: 5`.
* `account` ratelimits login requests based on the account the
client is attempting to log into. Defaults to `per_second: 0.17`,
`burst_count: 3`.
client is attempting to log into. Defaults to `per_second: 0.03`,
`burst_count: 5`.
* `failed_attempts` ratelimits login requests based on the account the
client is attempting to log into, based on the amount of failed login
@ -2227,8 +2227,8 @@ allows the shared secret to be specified in an external file.
The file should be a plain text file, containing only the shared secret.
If this file does not exist, Synapse will create a new signing
key on startup and store it in this file.
If this file does not exist, Synapse will create a new shared
secret on startup and store it in this file.
Example configuration:
```yaml

View File

@ -232,6 +232,7 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
@ -251,6 +252,7 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
^/_matrix/client/(api/v1|r0|v3|unstable)/knock/
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
# Account data requests

View File

@ -36,9 +36,6 @@ exclude = (?x)
[mypy-synapse.federation.transport.client]
disallow_untyped_defs = False
[mypy-synapse.http.client]
disallow_untyped_defs = False
[mypy-synapse.http.matrixfederationclient]
disallow_untyped_defs = False

178
poetry.lock generated
View File

@ -90,32 +90,46 @@ typecheck = ["mypy"]
[[package]]
name = "black"
version = "22.12.0"
version = "23.1.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"},
{file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"},
{file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"},
{file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"},
{file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"},
{file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"},
{file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"},
{file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"},
{file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"},
{file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"},
{file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"},
{file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"},
{file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"},
{file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"},
{file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"},
{file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"},
{file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"},
{file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"},
{file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"},
{file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"},
{file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"},
{file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"},
{file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"},
{file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"},
{file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"},
{file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"},
{file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"},
{file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"},
]
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
@ -1971,28 +1985,29 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
[[package]]
name = "ruff"
version = "0.0.237"
version = "0.0.252"
description = "An extremely fast Python linter, written in Rust."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.0.237-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:2ea04d826ffca58a7ae926115a801960c757d53c9027f2ca9acbe84c9f2b2f04"},
{file = "ruff-0.0.237-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8ed113937fab9f73f8c1a6c0350bb4fe03e951370139c6e0adb81f48a8dcf4c6"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcb71a3efb5fe886eb48d739cfae5df4a15617e7b5a7668aa45ebf74c0d3fa"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80ce10718abbf502818c0d650ebab99fdcef5e937a1ded3884493ddff804373c"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cc6cb7c1efcc260df5a939435649610a28f9f438b8b313384c8985ac6574f9f"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7eef0c7a1e45a4e30328ae101613575944cbf47a3a11494bf9827722da6c66b3"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d122433a21ce4a21fbba34b73fc3add0ccddd1643b3ff5abb8d2767952f872e"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b76311335adda4de3c1d471e64e89a49abfeebf02647e3db064e7740e7f36ed6"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c5977b643aaf2b6f84641265f835b6c7f67fcca38dbae08c4f15602e084ca0"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ed86d0d4d742360a262d52191581f12b669a68e59ae3b52e80d7483b3d7b3"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fedfb60f986c26cdb1809db02866e68508db99910c587d2c4066a5c07aa85593"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_i686.whl", hash = "sha256:bb96796be5919871fa9ae7e88968ba9e14306d9a3f217ca6c204f68a5abeccdd"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea239cfedf67b74ea4952e1074bb99a4281c2145441d70bc7e2f058d5c49f1c9"},
{file = "ruff-0.0.237-py3-none-win32.whl", hash = "sha256:8d6a1d21ae15da2b1dcffeee2606e90de0e6717e72957da7d16ab6ae18dd0058"},
{file = "ruff-0.0.237-py3-none-win_amd64.whl", hash = "sha256:525e5ec81cee29b993f77976026a6bf44528a14aa6edb1ef47bd8079147395ae"},
{file = "ruff-0.0.237.tar.gz", hash = "sha256:630c575f543733adf6c19a11d9a02ca9ecc364bd7140af8a4c854d4728be6b56"},
{file = "ruff-0.0.252-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:349367a227c4db7abbc3a9993efea8a608b5bea4bb4a1e5fc6f0d56819524f92"},
{file = "ruff-0.0.252-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ce77f9106d96b4faf7865860fb5155b9deaf6f699d9c279118c5ad947739ecaf"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edadb0b050293b4e60dab979ba6a4e734d9c899cbe316a0ee5b65e3cdd39c750"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4efdae98937d1e4d23ab0b7fc7e8e6b6836cc7d2d42238ceeacbc793ef780542"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8546d879f7d3f669379a03e7b103d90e11901976ab508aeda59c03dfd8a359e"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:83fdc7169b6c1fb5fe8d1cdf345697f558c1b433ef97df9ca11defa2a8f3ee9e"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84ed9be1a17e2a556a571a5b959398633dd10910abd8dcf8b098061e746e892d"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f5e77bd9ba4438cf2ee32154e2673afe22f538ef29f5d65ca47e3dc46c42cf8"},
{file = "ruff-0.0.252-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5179b94b45c0f8512eaff3ab304c14714a46df2e9ca72a9d96084adc376b71"},
{file = "ruff-0.0.252-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:92efd8a71157595df5bc46aaaa0613d8a2fbc5cddc53ae7b749c16025c324732"},
{file = "ruff-0.0.252-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd350fc10832cfd28e681d829a8aa83ea3e653326e0ea9d98637dfb8d46177d2"},
{file = "ruff-0.0.252-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f119240c9631216e846166e06023b1d878e25fbac93bf20da50069e91cfbfaee"},
{file = "ruff-0.0.252-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5c5a49f89f5ede93d16eddfeeadd7e5739ec703e8f63ac95eac30236b9e49da3"},
{file = "ruff-0.0.252-py3-none-win32.whl", hash = "sha256:89a897dc743f2fe063483ea666097e72e848f4bbe40493fe0533e61799959f6e"},
{file = "ruff-0.0.252-py3-none-win_amd64.whl", hash = "sha256:cdc89ad6ff88519b1fb1816ac82a9ad910762c90ff5fd64dda7691b72d36aff7"},
{file = "ruff-0.0.252-py3-none-win_arm64.whl", hash = "sha256:4b594a17cf53077165429486650658a0e1b2ac6ab88954f5afd50d2b1b5657a9"},
{file = "ruff-0.0.252.tar.gz", hash = "sha256:6992611ab7bdbe7204e4831c95ddd3febfeece2e6f5e44bbed044454c7db0f63"},
]
[[package]]
@ -2560,66 +2575,14 @@ files = [
[[package]]
name = "types-commonmark"
version = "0.9.2.1"
version = "0.9.2.2"
description = "Typing stubs for commonmark"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-commonmark-0.9.2.1.tar.gz", hash = "sha256:db8277e6aeb83429265eccece98a24954a9a502dde7bc7cf840a8741abd96b86"},
{file = "types_commonmark-0.9.2.1-py3-none-any.whl", hash = "sha256:9d5f500cb7eced801bde728137b0a10667bd853d328db641d03141f189e3aab4"},
]
[[package]]
name = "types-cryptography"
version = "3.3.15"
description = "Typing stubs for cryptography"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"},
{file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"},
]
[package.dependencies]
types-enum34 = "*"
types-ipaddress = "*"
[[package]]
name = "types-docutils"
version = "0.19.1.1"
description = "Typing stubs for docutils"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"},
{file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"},
]
[[package]]
name = "types-enum34"
version = "1.1.8"
description = "Typing stubs for enum34"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"},
{file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"},
]
[[package]]
name = "types-ipaddress"
version = "1.0.8"
description = "Typing stubs for ipaddress"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"},
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
{file = "types-commonmark-0.9.2.2.tar.gz", hash = "sha256:f3259350634c2ce68ae503398430482f7cf44e5cae3d344995e916fbf453b4be"},
{file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"},
]
[[package]]
@ -2636,54 +2599,54 @@ files = [
[[package]]
name = "types-opentracing"
version = "2.4.10.1"
version = "2.4.10.3"
description = "Typing stubs for opentracing"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-opentracing-2.4.10.1.tar.gz", hash = "sha256:49e7e52b8b6e221865a9201fc8c2df0bcda8e7098d4ebb35903dbfa4b4d29195"},
{file = "types_opentracing-2.4.10.1-py3-none-any.whl", hash = "sha256:eb63394acd793e7d9e327956242349fee14580a87c025408dc268d4dd883cc24"},
{file = "types-opentracing-2.4.10.3.tar.gz", hash = "sha256:b277f114265b41216714f9c77dffcab57038f1730fd141e2c55c5c9f6f2caa87"},
{file = "types_opentracing-2.4.10.3-py3-none-any.whl", hash = "sha256:60244d718fcd9de7043645ecaf597222d550432507098ab2e6268f7b589a7fa7"},
]
[[package]]
name = "types-pillow"
version = "9.4.0.13"
version = "9.4.0.17"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-Pillow-9.4.0.13.tar.gz", hash = "sha256:4510aa98a28947bf63f2b29edebbd11b7cff8647d90b867cec9b3674c0a8c321"},
{file = "types_Pillow-9.4.0.13-py3-none-any.whl", hash = "sha256:14a8a19021b8fe569a9fef9edc64a8d8a4aef340e38669d4fb3dc05cfd941130"},
{file = "types-Pillow-9.4.0.17.tar.gz", hash = "sha256:7f0e871d2d46fbb6bc7deca3e02dc552cf9c1e8b49deb9595509551be3954e49"},
{file = "types_Pillow-9.4.0.17-py3-none-any.whl", hash = "sha256:f8b848a05f17cb4d53d245c59bf560372b9778d4cfaf9705f6245009bf9f65f3"},
]
[[package]]
name = "types-psycopg2"
version = "2.9.21.4"
version = "2.9.21.8"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-psycopg2-2.9.21.4.tar.gz", hash = "sha256:d43dda166a70d073ddac40718e06539836b5844c99b58ef8d4489a8df2edf5c0"},
{file = "types_psycopg2-2.9.21.4-py3-none-any.whl", hash = "sha256:6a05dca0856996aa37d7abe436751803bf47ec006cabbefea092e057f23bc95d"},
{file = "types-psycopg2-2.9.21.8.tar.gz", hash = "sha256:b629440ffcfdebd742fab07f777ff69aefdd19394a138c18e921a1964c3cf5f6"},
{file = "types_psycopg2-2.9.21.8-py3-none-any.whl", hash = "sha256:e747fbec6e0e2502b625bc7686d13cc62fc170e8ae920e5ba27fac946778eeb9"},
]
[[package]]
name = "types-pyopenssl"
version = "22.1.0.2"
version = "23.0.0.4"
description = "Typing stubs for pyOpenSSL"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
{file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
{file = "types-pyOpenSSL-23.0.0.4.tar.gz", hash = "sha256:8b3550b6e19d51ce78aabd724b0d8ebd962081a5fce95e7f85a592dfcdbc16bf"},
{file = "types_pyOpenSSL-23.0.0.4-py3-none-any.whl", hash = "sha256:ad49e15bb8bb2f251b8fc24776f414d877629e44b1b049240063ab013b5a6a7d"},
]
[package.dependencies]
types-cryptography = "*"
cryptography = ">=35.0.0"
[[package]]
name = "types-pyyaml"
@ -2714,19 +2677,16 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
version = "67.3.0.1"
version = "67.5.0.0"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-setuptools-67.3.0.1.tar.gz", hash = "sha256:1a26d373036c720e566823b6edd664a2db4d138b6eeba856721ec1254203474f"},
{file = "types_setuptools-67.3.0.1-py3-none-any.whl", hash = "sha256:a7e0f0816b5b449f5bcdc0efa43da91ff81dbe6941f293a6490d68a450e130a1"},
{file = "types-setuptools-67.5.0.0.tar.gz", hash = "sha256:fa6f231eeb27e86b1d6e8260f73de300e91f99c205b9a5e21debd49f3726a849"},
{file = "types_setuptools-67.5.0.0-py3-none-any.whl", hash = "sha256:f7f4bf4ab777e88631d3a387bbfdd4d480a2a4693ca896130f8ef738370377b8"},
]
[package.dependencies]
types-docutils = "*"
[[package]]
name = "types-urllib3"
version = "1.26.10"
@ -2741,14 +2701,14 @@ files = [
[[package]]
name = "typing-extensions"
version = "4.4.0"
version = "4.5.0"
description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"},
{file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"},
{file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
{file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
]
[[package]]
@ -3030,4 +2990,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.7.1"
content-hash = "e12077711e5ff83f3c6038ea44c37bd49773799ec8245035b01094b7800c5c92"
content-hash = "7bcffef7b6e6d4b1113222e2ca152b3798c997872789c8a1ea01238f199d56fe"

View File

@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.78.0rc1"
version = "1.79.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@ -313,7 +313,7 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
ruff = "0.0.237"
ruff = "0.0.252"
# Typechecking
mypy = "*"

View File

@ -14,6 +14,7 @@
#![feature(test)]
use std::collections::BTreeSet;
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
PushRules, SimpleJsonValue,
@ -44,7 +45,6 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
Default::default(),
@ -53,15 +53,13 @@ fn bench_match_exact(b: &mut Bencher) {
vec![],
false,
false,
false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "room_id".into(),
pattern: Some("!room:server".into()),
pattern_type: None,
pattern: "!room:server".into(),
},
));
@ -93,7 +91,6 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
Default::default(),
@ -102,15 +99,13 @@ fn bench_match_word(b: &mut Bencher) {
vec![],
false,
false,
false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
pattern: Some("test".into()),
pattern_type: None,
pattern: "test".into(),
},
));
@ -142,7 +137,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
Default::default(),
@ -151,15 +145,13 @@ fn bench_match_word_miss(b: &mut Bencher) {
vec![],
false,
false,
false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
pattern: Some("foobar".into()),
pattern_type: None,
pattern: "foobar".into(),
},
));
@ -191,7 +183,6 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
Default::default(),
@ -200,7 +191,6 @@ fn bench_eval_message(b: &mut Bencher) {
vec![],
false,
false,
false,
)
.unwrap();

View File

@ -21,13 +21,13 @@ use lazy_static::lazy_static;
use serde_json::Value;
use super::KnownCondition;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
use crate::push::RelatedEventMatchCondition;
use crate::push::RelatedEventMatchTypeCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue};
use crate::push::{Action, EventPropertyIsCondition, SimpleJsonValue};
use crate::push::{Condition, EventMatchTypeCondition};
use crate::push::{EventMatchCondition, EventMatchPatternType};
use crate::push::{EventPropertyIsTypeCondition, PushRule};
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
@ -72,8 +72,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.m.relates_to.rel_type"),
pattern: Some(Cow::Borrowed("m.replace")),
pattern_type: None,
pattern: Cow::Borrowed("m.replace"),
},
))]),
actions: Cow::Borrowed(&[]),
@ -86,8 +85,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.msgtype"),
pattern: Some(Cow::Borrowed("m.notice")),
pattern_type: None,
pattern: Cow::Borrowed("m.notice"),
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
@ -100,18 +98,15 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.member"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.membership"),
pattern: Some(Cow::Borrowed("invite")),
pattern_type: None,
pattern: Cow::Borrowed("invite"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition {
key: Cow::Borrowed("state_key"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_id")),
pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
@ -124,8 +119,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.member"),
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
@ -135,11 +129,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
RelatedEventMatchCondition {
key: Some(Cow::Borrowed("sender")),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_id")),
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatchType(
RelatedEventMatchTypeCondition {
key: Cow::Borrowed("sender"),
pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId),
rel_type: Cow::Borrowed("m.in_reply_to"),
include_fallbacks: None,
},
@ -151,7 +144,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]),
conditions: Cow::Borrowed(&[Condition::Known(
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
key: Cow::Borrowed("content.org.matrix.msc3952.mentions.user_ids"),
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
}),
)]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
@ -168,7 +166,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::ExactEventMatch(ExactEventMatchCondition {
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"),
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
})),
@ -189,8 +187,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.body"),
pattern: Some(Cow::Borrowed("@room")),
pattern_type: None,
pattern: Cow::Borrowed("@room"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
@ -203,13 +200,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.tombstone")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.tombstone"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
pattern: Cow::Borrowed(""),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
@ -222,8 +217,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.reaction")),
pattern_type: None,
pattern: Cow::Borrowed("m.reaction"),
},
))]),
actions: Cow::Borrowed(&[]),
@ -236,13 +230,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.server_acl")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.server_acl"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
pattern: Cow::Borrowed(""),
})),
]),
actions: Cow::Borrowed(&[]),
@ -255,8 +247,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc3381.poll.response"),
},
))]),
actions: Cow::Borrowed(&[]),
@ -268,11 +259,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
priority_class: 4,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatchType(
EventMatchTypeCondition {
key: Cow::Borrowed("content.body"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_localpart")),
pattern_type: Cow::Borrowed(&EventMatchPatternType::UserLocalpart),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
@ -287,8 +277,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.call.invite")),
pattern_type: None,
pattern: Cow::Borrowed("m.call.invite"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
@ -301,8 +290,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.message"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -318,8 +306,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.encrypted"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -338,8 +325,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.encrypted"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -363,8 +349,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.message"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -388,8 +373,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.file"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -413,8 +397,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.image"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -438,8 +421,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.video"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -463,8 +445,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc1767.audio"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@ -485,8 +466,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.message"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@ -499,8 +479,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
pattern: Cow::Borrowed("m.room.encrypted"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@ -514,8 +493,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.encrypted")),
pattern_type: None,
pattern: Cow::Borrowed("m.encrypted"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -534,8 +512,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.message")),
pattern_type: None,
pattern: Cow::Borrowed("m.message"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -554,8 +531,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.file")),
pattern_type: None,
pattern: Cow::Borrowed("m.file"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -574,8 +550,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.image")),
pattern_type: None,
pattern: Cow::Borrowed("m.image"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -594,8 +569,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.video")),
pattern_type: None,
pattern: Cow::Borrowed("m.video"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -614,8 +588,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.audio")),
pattern_type: None,
pattern: Cow::Borrowed("m.audio"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@ -633,18 +606,15 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
pattern_type: None,
pattern: Cow::Borrowed("im.vector.modular.widgets"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.type"),
pattern: Some(Cow::Borrowed("jitsi")),
pattern_type: None,
pattern: Cow::Borrowed("jitsi"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("*")),
pattern_type: None,
pattern: Cow::Borrowed("*"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@ -660,8 +630,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
@ -674,8 +643,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
@ -691,8 +659,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
@ -705,8 +672,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
pattern_type: None,
pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),

View File

@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap, BTreeSet};
use std::borrow::Cow;
use std::collections::BTreeMap;
use crate::push::JsonValue;
use anyhow::{Context, Error};
use lazy_static::lazy_static;
use log::warn;
@ -23,9 +23,10 @@ use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
Action, Condition, EventMatchCondition, ExactEventMatchCondition, FilteredPushRules,
KnownCondition, RelatedEventMatchCondition, SimpleJsonValue,
Action, Condition, EventPropertyIsCondition, FilteredPushRules, KnownCondition,
SimpleJsonValue,
};
use crate::push::{EventMatchPatternType, JsonValue};
lazy_static! {
/// Used to parse the `is` clause in the room member count condition.
@ -71,8 +72,6 @@ pub struct PushRuleEvaluator {
/// True if the event has a mentions property and MSC3952 support is enabled.
has_mentions: bool,
/// The user mentions that were part of the message.
user_mentions: BTreeSet<String>,
/// The number of users in the room.
room_member_count: u64,
@ -98,9 +97,6 @@ pub struct PushRuleEvaluator {
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
/// If MSC3758 (exact_event_match push rule condition) is enabled.
msc3758_exact_event_match: bool,
/// If MSC3966 (exact_event_property_contains push rule condition) is enabled.
msc3966_exact_event_property_contains: bool,
}
@ -113,7 +109,6 @@ impl PushRuleEvaluator {
pub fn py_new(
flattened_keys: BTreeMap<String, JsonValue>,
has_mentions: bool,
user_mentions: BTreeSet<String>,
room_member_count: u64,
sender_power_level: Option<i64>,
notification_power_levels: BTreeMap<String, i64>,
@ -121,7 +116,6 @@ impl PushRuleEvaluator {
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
msc3758_exact_event_match: bool,
msc3966_exact_event_property_contains: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
@ -133,7 +127,6 @@ impl PushRuleEvaluator {
flattened_keys,
body,
has_mentions,
user_mentions,
room_member_count,
notification_power_levels,
sender_power_level,
@ -141,7 +134,6 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc3758_exact_event_match,
msc3966_exact_event_property_contains,
})
}
@ -256,24 +248,83 @@ impl PushRuleEvaluator {
};
let result = match known_condition {
KnownCondition::EventMatch(event_match) => {
self.match_event_match(event_match, user_id)?
}
KnownCondition::ExactEventMatch(exact_event_match) => {
self.match_exact_event_match(exact_event_match)?
}
KnownCondition::RelatedEventMatch(event_match) => {
self.match_related_event_match(event_match, user_id)?
}
KnownCondition::ExactEventPropertyContains(exact_event_match) => {
self.match_exact_event_property_contains(exact_event_match)?
}
KnownCondition::IsUserMention => {
if let Some(uid) = user_id {
self.user_mentions.contains(uid)
KnownCondition::EventMatch(event_match) => self.match_event_match(
&self.flattened_keys,
&event_match.key,
&event_match.pattern,
)?,
KnownCondition::EventMatchType(event_match) => {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
false
}
return Ok(false);
};
let pattern = match &*event_match.pattern_type {
EventMatchPatternType::UserId => user_id,
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
};
self.match_event_match(&self.flattened_keys, &event_match.key, pattern)?
}
KnownCondition::EventPropertyIs(event_property_is) => {
self.match_event_property_is(event_property_is)?
}
KnownCondition::RelatedEventMatch(event_match) => self.match_related_event_match(
&event_match.rel_type.clone(),
event_match.include_fallbacks,
event_match.key.clone(),
event_match.pattern.clone(),
)?,
KnownCondition::RelatedEventMatchType(event_match) => {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
return Ok(false);
};
let pattern = match &*event_match.pattern_type {
EventMatchPatternType::UserId => user_id,
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
};
self.match_related_event_match(
&event_match.rel_type.clone(),
event_match.include_fallbacks,
Some(event_match.key.clone()),
Some(Cow::Borrowed(pattern)),
)?
}
KnownCondition::ExactEventPropertyContains(event_property_is) => self
.match_exact_event_property_contains(
event_property_is.key.clone(),
event_property_is.value.clone(),
)?,
KnownCondition::ExactEventPropertyContainsType(exact_event_match) => {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
return Ok(false);
};
let pattern = match &*exact_event_match.value_type {
EventMatchPatternType::UserId => user_id,
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
};
self.match_exact_event_property_contains(
exact_event_match.key.clone(),
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
)?
}
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
@ -325,135 +376,18 @@ impl PushRuleEvaluator {
/// Evaluates a `event_match` condition.
fn match_event_match(
&self,
event_match: &EventMatchCondition,
user_id: Option<&str>,
flattened_event: &BTreeMap<String, JsonValue>,
key: &str,
pattern: &str,
) -> Result<bool, Error> {
let pattern = if let Some(pattern) = &event_match.pattern {
pattern
} else if let Some(pattern_type) = &event_match.pattern_type {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
return Ok(false);
};
match &**pattern_type {
"user_id" => user_id,
"user_localpart" => get_localpart_from_id(user_id)?,
_ => return Ok(false),
}
} else {
return Ok(false);
};
let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) =
self.flattened_keys.get(&*event_match.key)
flattened_event.get(key)
{
haystack
} else {
return Ok(false);
};
// For the content.body we match against "words", but for everything
// else we match against the entire value.
let match_type = if event_match.key == "content.body" {
GlobMatchType::Word
} else {
GlobMatchType::Whole
};
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
compiled_pattern.is_match(haystack)
}
/// Evaluates a `exact_event_match` condition. (MSC3758)
fn match_exact_event_match(
&self,
exact_event_match: &ExactEventMatchCondition,
) -> Result<bool, Error> {
// First check if the feature is enabled.
if !self.msc3758_exact_event_match {
return Ok(false);
}
let value = &exact_event_match.value;
let haystack = if let Some(JsonValue::Value(haystack)) =
self.flattened_keys.get(&*exact_event_match.key)
{
haystack
} else {
return Ok(false);
};
Ok(haystack == &**value)
}
/// Evaluates a `related_event_match` condition. (MSC3664)
fn match_related_event_match(
&self,
event_match: &RelatedEventMatchCondition,
user_id: Option<&str>,
) -> Result<bool, Error> {
// First check if related event matching is enabled...
if !self.related_event_match_enabled {
return Ok(false);
}
// get the related event, fail if there is none.
let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
event
} else {
return Ok(false);
};
// If we are not matching fallbacks, don't match if our special key indicating this is a
// fallback relation is not present.
if !event_match.include_fallbacks.unwrap_or(false)
&& event.contains_key("im.vector.is_falling_back")
{
return Ok(false);
}
// if we have no key, accept the event as matching, if it existed without matching any
// fields.
let key = if let Some(key) = &event_match.key {
key
} else {
return Ok(true);
};
let pattern = if let Some(pattern) = &event_match.pattern {
pattern
} else if let Some(pattern_type) = &event_match.pattern_type {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
return Ok(false);
};
match &**pattern_type {
"user_id" => user_id,
"user_localpart" => get_localpart_from_id(user_id)?,
_ => return Ok(false),
}
} else {
return Ok(false);
};
let haystack =
if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = event.get(&**key) {
haystack
} else {
return Ok(false);
};
// For the content.body we match against "words", but for everything
// else we match against the entire value.
let match_type = if key == "content.body" {
@ -466,27 +400,78 @@ impl PushRuleEvaluator {
compiled_pattern.is_match(haystack)
}
/// Evaluates a `exact_event_property_contains` condition. (MSC3758)
fn match_exact_event_property_contains(
/// Evaluates a `event_property_is` condition.
fn match_event_property_is(
&self,
exact_event_match: &ExactEventMatchCondition,
event_property_is: &EventPropertyIsCondition,
) -> Result<bool, Error> {
// First check if the feature is enabled.
if !self.msc3966_exact_event_property_contains {
return Ok(false);
}
let value = &event_property_is.value;
let value = &exact_event_match.value;
let haystack = if let Some(JsonValue::Array(haystack)) =
self.flattened_keys.get(&*exact_event_match.key)
let haystack = if let Some(JsonValue::Value(haystack)) =
self.flattened_keys.get(&*event_property_is.key)
{
haystack
} else {
return Ok(false);
};
Ok(haystack.contains(&**value))
Ok(haystack == &**value)
}
/// Evaluates a `related_event_match` condition. (MSC3664)
fn match_related_event_match(
&self,
rel_type: &str,
include_fallbacks: Option<bool>,
key: Option<Cow<str>>,
pattern: Option<Cow<str>>,
) -> Result<bool, Error> {
// First check if related event matching is enabled...
if !self.related_event_match_enabled {
return Ok(false);
}
// get the related event, fail if there is none.
let event = if let Some(event) = self.related_events_flattened.get(rel_type) {
event
} else {
return Ok(false);
};
// If we are not matching fallbacks, don't match if our special key indicating this is a
// fallback relation is not present.
if !include_fallbacks.unwrap_or(false) && event.contains_key("im.vector.is_falling_back") {
return Ok(false);
}
match (key, pattern) {
// if we have no key, accept the event as matching.
(None, _) => Ok(true),
// There was a key, so we *must* have a pattern to go with it.
(Some(_), None) => Ok(false),
// If there is a key & pattern, check if they're in the flattened event (given by rel_type).
(Some(key), Some(pattern)) => self.match_event_match(event, &key, &pattern),
}
}
/// Evaluates a `exact_event_property_contains` condition. (MSC3966)
fn match_exact_event_property_contains(
&self,
key: Cow<str>,
value: Cow<SimpleJsonValue>,
) -> Result<bool, Error> {
// First check if the feature is enabled.
if !self.msc3966_exact_event_property_contains {
return Ok(false);
}
let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) {
haystack
} else {
return Ok(false);
};
Ok(haystack.contains(&value))
}
/// Match the member count against an 'is' condition
@ -523,7 +508,6 @@ fn push_rule_evaluator() {
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
BTreeMap::new(),
@ -532,7 +516,6 @@ fn push_rule_evaluator() {
vec![],
true,
true,
true,
)
.unwrap();
@ -555,7 +538,6 @@ fn test_requires_room_version_supports_condition() {
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
10,
Some(0),
BTreeMap::new(),
@ -564,7 +546,6 @@ fn test_requires_room_version_supports_condition() {
flags,
true,
true,
true,
)
.unwrap();

View File

@ -328,14 +328,23 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
#[serde(rename = "com.beeper.msc3758.exact_event_match")]
ExactEventMatch(ExactEventMatchCondition),
// Identical to event_match but gives predefined patterns. Cannot be added by users.
#[serde(skip_deserializing, rename = "event_match")]
EventMatchType(EventMatchTypeCondition),
EventPropertyIs(EventPropertyIsCondition),
#[serde(rename = "im.nheko.msc3664.related_event_match")]
RelatedEventMatch(RelatedEventMatchCondition),
// Identical to related_event_match but gives predefined patterns. Cannot be added by users.
#[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")]
RelatedEventMatchType(RelatedEventMatchTypeCondition),
#[serde(rename = "org.matrix.msc3966.exact_event_property_contains")]
ExactEventPropertyContains(ExactEventMatchCondition),
#[serde(rename = "org.matrix.msc3952.is_user_mention")]
IsUserMention,
ExactEventPropertyContains(EventPropertyIsCondition),
// Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users.
#[serde(
skip_deserializing,
rename = "org.matrix.msc3966.exact_event_property_contains"
)]
ExactEventPropertyContainsType(EventPropertyIsTypeCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@ -362,23 +371,45 @@ impl<'source> FromPyObject<'source> for Condition {
}
}
/// The body of a [`Condition::EventMatch`]
/// The body of a [`Condition::EventMatch`] with a pattern.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EventMatchCondition {
pub key: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern_type: Option<Cow<'static, str>>,
pub pattern: Cow<'static, str>,
}
/// The body of a [`Condition::ExactEventMatch`]
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum EventMatchPatternType {
UserId,
UserLocalpart,
}
/// The body of a [`Condition::EventMatch`] that uses user_id or user_localpart as a pattern.
#[derive(Serialize, Debug, Clone)]
pub struct EventMatchTypeCondition {
pub key: Cow<'static, str>,
// During serialization, the pattern_type property gets replaced with a
// pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user.
pub pattern_type: Cow<'static, EventMatchPatternType>,
}
/// The body of a [`Condition::EventPropertyIs`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExactEventMatchCondition {
pub struct EventPropertyIsCondition {
pub key: Cow<'static, str>,
pub value: Cow<'static, SimpleJsonValue>,
}
/// The body of a [`Condition::EventPropertyIs`] that uses user_id or user_localpart as a pattern.
#[derive(Serialize, Debug, Clone)]
pub struct EventPropertyIsTypeCondition {
pub key: Cow<'static, str>,
// During serialization, the pattern_type property gets replaced with a
// pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user.
pub value_type: Cow<'static, EventMatchPatternType>,
}
/// The body of a [`Condition::RelatedEventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RelatedEventMatchCondition {
@ -386,8 +417,18 @@ pub struct RelatedEventMatchCondition {
pub key: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern: Option<Cow<'static, str>>,
pub rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern_type: Option<Cow<'static, str>>,
pub include_fallbacks: Option<bool>,
}
/// The body of a [`Condition::RelatedEventMatch`] that uses user_id or user_localpart as a pattern.
#[derive(Serialize, Debug, Clone)]
pub struct RelatedEventMatchTypeCondition {
// This is only used if pattern_type exists (and thus key must exist), so is
// a bit simpler than RelatedEventMatchCondition.
pub key: Cow<'static, str>,
pub pattern_type: Cow<'static, EventMatchPatternType>,
pub rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub include_fallbacks: Option<bool>,
@ -571,8 +612,7 @@ impl FilteredPushRules {
fn test_serialize_condition() {
let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: "content.body".into(),
pattern: Some("coffee".into()),
pattern_type: None,
pattern: "coffee".into(),
}));
let json = serde_json::to_string(&condition).unwrap();
@ -586,7 +626,33 @@ fn test_serialize_condition() {
fn test_deserialize_condition() {
let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
let _: Condition = serde_json::from_str(json).unwrap();
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::EventMatch(_))
));
}
#[test]
fn test_serialize_event_match_condition_with_pattern_type() {
let condition = Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition {
key: "content.body".into(),
pattern_type: Cow::Owned(EventMatchPatternType::UserId),
}));
let json = serde_json::to_string(&condition).unwrap();
assert_eq!(
json,
r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#
)
}
#[test]
fn test_cannot_deserialize_event_match_condition_with_pattern_type() {
let json = r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(condition, Condition::Unknown(_)));
}
#[test]
@ -600,6 +666,37 @@ fn test_deserialize_unstable_msc3664_condition() {
));
}
#[test]
fn test_serialize_unstable_msc3664_condition_with_pattern_type() {
let condition = Condition::Known(KnownCondition::RelatedEventMatchType(
RelatedEventMatchTypeCondition {
key: "content.body".into(),
pattern_type: Cow::Owned(EventMatchPatternType::UserId),
rel_type: "m.in_reply_to".into(),
include_fallbacks: Some(true),
},
));
let json = serde_json::to_string(&condition).unwrap();
assert_eq!(
json,
r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to","include_fallbacks":true}"#
)
}
#[test]
fn test_cannot_deserialize_unstable_msc3664_condition_with_pattern_type() {
let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
// Since pattern is optional on RelatedEventMatch it deserializes it to that
// instead of RelatedEventMatchType.
assert!(matches!(
condition,
Condition::Known(KnownCondition::RelatedEventMatch(_))
));
}
#[test]
fn test_deserialize_unstable_msc3931_condition() {
let json =
@ -613,55 +710,41 @@ fn test_deserialize_unstable_msc3931_condition() {
}
#[test]
fn test_deserialize_unstable_msc3758_condition() {
fn test_deserialize_event_property_is_condition() {
// A string condition should work.
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":"foo"}"#;
let json = r#"{"kind":"event_property_is","key":"content.value","value":"foo"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
Condition::Known(KnownCondition::EventPropertyIs(_))
));
// A boolean condition should work.
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":true}"#;
let json = r#"{"kind":"event_property_is","key":"content.value","value":true}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
Condition::Known(KnownCondition::EventPropertyIs(_))
));
// An integer condition should work.
let json = r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":1}"#;
let json = r#"{"kind":"event_property_is","key":"content.value","value":1}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
Condition::Known(KnownCondition::EventPropertyIs(_))
));
// A null condition should work
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":null}"#;
let json = r#"{"kind":"event_property_is","key":"content.value","value":null}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
));
}
#[test]
fn test_deserialize_unstable_msc3952_user_condition() {
let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::IsUserMention)
Condition::Known(KnownCondition::EventPropertyIs(_))
));
}

View File

@ -59,6 +59,11 @@ Run the complement test suite on Synapse.
is important.
Not suitable for use in CI in case the editable environment is impure.
--rebuild-editable
Force a rebuild of the editable build of Synapse.
This is occasionally useful if the built-in rebuild detection with
--editable fails, e.g. when changing configure_workers_and_start.py.
For help on arguments to 'go test', run 'go help testflag'.
EOF
}
@ -82,6 +87,9 @@ while [ $# -ge 1 ]; do
"-e"|"--editable")
use_editable_synapse=1
;;
"--rebuild-editable")
rebuild_editable_synapse=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
@ -116,7 +124,9 @@ if [ -n "$use_editable_synapse" ]; then
fi
editable_mount="$(realpath .):/editable-src:z"
if docker inspect complement-synapse-editable &>/dev/null; then
if [ -n "$rebuild_editable_synapse" ]; then
unset skip_docker_build
elif docker inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)

View File

@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
# Catch any common programming mistakes in Python code.
# --quiet suppresses the update check.
ruff --quiet "${files[@]}"
ruff --quiet --fix "${files[@]}"
# Catch any common programming mistakes in Rust code.
#

View File

@ -29,7 +29,6 @@ _Repr = Callable[[], str]
def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ...
class SortedList(MutableSequence[_T]):
DEFAULT_LOAD_FACTOR: int = ...
def __init__(
self,

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
from synapse.types import JsonDict, JsonValue
@ -58,7 +58,6 @@ class PushRuleEvaluator:
self,
flattened_keys: Mapping[str, JsonValue],
has_mentions: bool,
user_mentions: Set[str],
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
@ -66,7 +65,6 @@ class PushRuleEvaluator:
related_event_match_enabled: bool,
room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool,
msc3758_exact_event_match: bool,
msc3966_exact_event_property_contains: bool,
): ...
def run(

View File

@ -1,5 +1,6 @@
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-9 New Vector Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix homeserver.
""" This is an implementation of a Matrix homeserver.
"""
import json

View File

@ -37,7 +37,7 @@ import os
import shutil
import sys
from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.media.filepath import MediaFilePaths
logger = logging.getLogger()

View File

@ -47,7 +47,6 @@ def request_registration(
_print: Callable[[str], None] = print,
exit: Callable[[int], None] = sys.exit,
) -> None:
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
# Get the nonce
@ -154,7 +153,6 @@ def register_new_user(
def main() -> None:
logging.captureWarnings(True)
parser = argparse.ArgumentParser(

View File

@ -1205,7 +1205,6 @@ class CursesProgress(Progress):
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
else:
if self.total_processed > 0:
left = float(self.total_remaining) / self.total_processed

View File

@ -167,7 +167,6 @@ Worker = collections.namedtuple(
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(

View File

@ -213,7 +213,7 @@ def handle_startup_exception(e: Exception) -> NoReturn:
def redirect_stdio_to_logs() -> None:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
for (stream, level) in streams:
for stream, level in streams:
oldStream = getattr(sys, stream)
loggingFile = LoggingFile(
logger=twisted.logger.Logger(namespace=stream),

View File

@ -44,6 +44,7 @@ from synapse.storage.databases.main.event_push_actions import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.filtering import FilteringWorkerStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
@ -86,6 +87,7 @@ class AdminCmdSlavedStore(
RegistrationWorkerStore,
RoomWorkerStore,
ProfileWorkerStore,
MediaRepositoryStore,
):
def __init__(
self,
@ -149,7 +151,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(events_file, "a") as f:
for event in events:
print(json.dumps(event.get_pdu_json()), file=f)
json.dump(event.get_pdu_json(), fp=f)
def write_state(
self, room_id: str, event_id: str, state: StateMap[EventBase]
@ -162,7 +164,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(event_file, "a") as f:
for event in state.values():
print(json.dumps(event.get_pdu_json()), file=f)
json.dump(event.get_pdu_json(), fp=f)
def write_invite(
self, room_id: str, event: EventBase, state: StateMap[EventBase]
@ -178,7 +180,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(invite_state, "a") as f:
for event in state.values():
print(json.dumps(event), file=f)
json.dump(event, fp=f)
def write_knock(
self, room_id: str, event: EventBase, state: StateMap[EventBase]
@ -194,7 +196,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(knock_state, "a") as f:
for event in state.values():
print(json.dumps(event), file=f)
json.dump(event, fp=f)
def write_profile(self, profile: JsonDict) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@ -202,7 +204,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
profile_file = os.path.join(user_directory, "profile")
with open(profile_file, "a") as f:
print(json.dumps(profile), file=f)
json.dump(profile, fp=f)
def write_devices(self, devices: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@ -211,7 +213,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
for device in devices:
with open(device_file, "a") as f:
print(json.dumps(device), file=f)
json.dump(device, fp=f)
def write_connections(self, connections: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@ -220,7 +222,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
for connection in connections:
with open(connection_file, "a") as f:
print(json.dumps(connection), file=f)
json.dump(connection, fp=f)
def write_account_data(
self, file_name: str, account_data: Mapping[str, JsonDict]
@ -233,7 +235,15 @@ class FileExfiltrationWriter(ExfiltrationWriter):
account_data_file = os.path.join(account_data_directory, file_name)
with open(account_data_file, "a") as f:
print(json.dumps(account_data), file=f)
json.dump(account_data, fp=f)
def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
file_directory = os.path.join(self.base_directory, "media_ids")
os.makedirs(file_directory, exist_ok=True)
media_id_file = os.path.join(file_directory, media_id)
with open(media_id_file, "w") as f:
json.dump(media_metadata, fp=f)
def finished(self) -> str:
return self.base_directory

View File

@ -219,7 +219,7 @@ def main() -> None:
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
for (func, worker_args) in zip(worker_functions, args_by_worker):
for func, worker_args in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
)

View File

@ -157,7 +157,6 @@ class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
assert listener_config.http_options is not None
# We always include a health resource.

View File

@ -321,7 +321,6 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
and not config.registration.registrations_require_3pid
and not config.registration.registration_requires_token
):
raise ConfigError(
"You have enabled open registration without any verification. This is a known vector for "
"spam and abuse. If you would like to allow public registration, please consider adding email, "

View File

@ -22,7 +22,6 @@ from ._base import Config
class ConsentConfig(Config):
section = "consent"
def __init__(self, *args: Any):

View File

@ -154,7 +154,6 @@ class DatabaseConfig(Config):
logger.warning(NON_SQLITE_DATABASE_PATH_WARNING)
def set_databasepath(self, database_path: str) -> None:
if database_path != ":memory:":
database_path = self.abspath(database_path)

View File

@ -166,23 +166,20 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
# MSC3925: do not replace events with their edits
self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False)
# MSC3758: exact_event_match push rule condition
self.msc3758_exact_event_match = experimental.get(
"msc3758_exact_event_match", False
)
# MSC3873: Disambiguate event_match keys.
self.msc3783_escape_event_match_key = experimental.get(
"msc3783_escape_event_match_key", False
self.msc3873_escape_event_match_key = experimental.get(
"msc3873_escape_event_match_key", False
)
# MSC3952: Intentional mentions, this depends on MSC3758.
# MSC3966: exact_event_property_contains push rule condition.
self.msc3966_exact_event_property_contains = experimental.get(
"msc3966_exact_event_property_contains", False
)
# MSC3952: Intentional mentions, this depends on MSC3966.
self.msc3952_intentional_mentions = (
experimental.get("msc3952_intentional_mentions", False)
and self.msc3758_exact_event_match
and self.msc3966_exact_event_property_contains
)
# MSC3959: Do not generate notifications for edits.
@ -194,3 +191,6 @@ class ExperimentalConfig(Config):
self.msc3966_exact_event_property_contains = experimental.get(
"msc3966_exact_event_property_contains", False
)
# MSC3967: Do not require UIA when first uploading cross signing keys
self.msc3967_enabled = experimental.get("msc3967_enabled", False)

View File

@ -56,7 +56,6 @@ from .workers import WorkerConfig
class HomeServerConfig(RootConfig):
config_classes = [
ModulesConfig,
ServerConfig,

View File

@ -46,7 +46,6 @@ class RatelimitConfig(Config):
section = "ratelimiting"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# Load the new-style messages config if it exists. Otherwise fall back
# to the old method.
if "rc_message" in config:
@ -87,9 +86,18 @@ class RatelimitConfig(Config):
defaults={"per_second": 0.1, "burst_count": 5},
)
# It is reasonable to login with a bunch of devices at once (i.e. when
# setting up an account), but it is *not* valid to continually be
# logging into new devices.
rc_login_config = config.get("rc_login", {})
self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {}))
self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {}))
self.rc_login_address = RatelimitSettings(
rc_login_config.get("address", {}),
defaults={"per_second": 0.003, "burst_count": 5},
)
self.rc_login_account = RatelimitSettings(
rc_login_config.get("account", {}),
defaults={"per_second": 0.003, "burst_count": 5},
)
self.rc_login_failed_attempts = RatelimitSettings(
rc_login_config.get("failed_attempts", {})
)

View File

@ -116,7 +116,6 @@ class ContentRepositoryConfig(Config):
section = "media"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo.
if (
@ -179,11 +178,13 @@ class ContentRepositoryConfig(Config):
for i, provider_config in enumerate(storage_providers):
# We special case the module "file_system" so as not to need to
# expose FileStorageProviderBackend
if provider_config["module"] == "file_system":
provider_config["module"] = (
"synapse.rest.media.v1.storage_provider"
".FileStorageProviderBackend"
)
if (
provider_config["module"] == "file_system"
or provider_config["module"] == "synapse.rest.media.v1.storage_provider"
):
provider_config[
"module"
] = "synapse.media.storage_provider.FileStorageProviderBackend"
provider_class, parsed_config = load_module(
provider_config, ("media_storage_providers", "<item %i>" % i)

View File

@ -735,7 +735,6 @@ class ServerConfig(Config):
listeners: Optional[List[dict]],
**kwargs: Any,
) -> str:
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
unsecure_port = bind_port - 400

View File

@ -30,7 +30,6 @@ class TlsConfig(Config):
section = "tls"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))

View File

@ -399,7 +399,7 @@ class Keyring:
# We now convert the returned list of results into a map from server
# name to key ID to FetchKeyResult, to return.
to_return: Dict[str, Dict[str, FetchKeyResult]] = {}
for (request, results) in zip(deduped_requests, results_per_request):
for request, results in zip(deduped_requests, results_per_request):
to_return_by_server = to_return.setdefault(request.server_name, {})
for key_id, key_result in results.items():
existing = to_return_by_server.get(key_id)

View File

@ -23,6 +23,7 @@ from synapse.types import JsonDict, StateMap
if TYPE_CHECKING:
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases import StateGroupDataStore
from synapse.storage.databases.main import DataStore
from synapse.types.state import StateFilter
@ -348,6 +349,54 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
partial_state: bool
state_map_before_event: Optional[StateMap[str]] = None
@classmethod
async def batch_persist_unpersisted_contexts(
cls,
events_and_context: List[Tuple[EventBase, "UnpersistedEventContextBase"]],
room_id: str,
last_known_state_group: int,
datastore: "StateGroupDataStore",
) -> List[Tuple[EventBase, EventContext]]:
"""
Takes a list of events and their associated unpersisted contexts and persists
the unpersisted contexts, returning a list of events and persisted contexts.
Note that all the events must be in a linear chain (ie a <- b <- c).
Args:
events_and_context: A list of events and their unpersisted contexts
room_id: the room_id for the events
last_known_state_group: the last persisted state group
datastore: a state datastore
"""
amended_events_and_context = await datastore.store_state_deltas_for_batched(
events_and_context, room_id, last_known_state_group
)
events_and_persisted_context = []
for event, unpersisted_context in amended_events_and_context:
if event.is_state():
context = EventContext(
storage=unpersisted_context._storage,
state_group=unpersisted_context.state_group_after_event,
state_group_before_event=unpersisted_context.state_group_before_event,
state_delta_due_to_event=unpersisted_context.state_delta_due_to_event,
partial_state=unpersisted_context.partial_state,
prev_group=unpersisted_context.state_group_before_event,
delta_ids=unpersisted_context.state_delta_due_to_event,
)
else:
context = EventContext(
storage=unpersisted_context._storage,
state_group=unpersisted_context.state_group_after_event,
state_group_before_event=unpersisted_context.state_group_before_event,
state_delta_due_to_event=unpersisted_context.state_delta_due_to_event,
partial_state=unpersisted_context.partial_state,
prev_group=unpersisted_context.prev_group_for_state_group_before_event,
delta_ids=unpersisted_context.delta_ids_to_state_group_before_event,
)
events_and_persisted_context.append((event, context))
return events_and_persisted_context
async def get_prev_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:

View File

@ -33,8 +33,8 @@ from typing_extensions import Literal
import synapse
from synapse.api.errors import Codes
from synapse.logging.opentracing import trace
from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
from synapse.media._base import FileInfo
from synapse.media.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import JsonDict, RoomAlias, UserProfile
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable

View File

@ -45,6 +45,8 @@ CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable]
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
@ -78,7 +80,6 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
# correctly, we need to await its result. Therefore it doesn't make a lot of
# sense to make it go through the run() wrapper.
if f.__name__ == "check_event_allowed":
# We need to wrap check_event_allowed because its old form would return either
# a boolean or a dict, but now we want to return the dict separately from the
# boolean.
@ -100,7 +101,6 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
return wrap_check_event_allowed
if f.__name__ == "on_create_room":
# We need to wrap on_create_room because its old form would return a boolean
# if the room creation is denied, but now we just want it to raise an
# exception.
@ -174,6 +174,12 @@ class ThirdPartyEventRules:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = []
self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = []
self._on_add_user_third_party_identifier_callbacks: List[
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = []
self._on_remove_user_third_party_identifier_callbacks: List[
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = []
def register_third_party_rules_callbacks(
self,
@ -193,6 +199,12 @@ class ThirdPartyEventRules:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = None,
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
on_add_user_third_party_identifier: Optional[
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = None,
on_remove_user_third_party_identifier: Optional[
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = None,
) -> None:
"""Register callbacks from modules for each hook."""
if check_event_allowed is not None:
@ -230,6 +242,11 @@ class ThirdPartyEventRules:
if on_threepid_bind is not None:
self._on_threepid_bind_callbacks.append(on_threepid_bind)
if on_add_user_third_party_identifier is not None:
self._on_add_user_third_party_identifier_callbacks.append(
on_add_user_third_party_identifier
)
async def check_event_allowed(
self,
event: EventBase,
@ -513,6 +530,9 @@ class ThirdPartyEventRules:
local homeserver, not when it's created on an identity server (and then kept track
of so that it can be unbound on the same IS later on).
THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the
`on_add_user_third_party_identifier` callback method instead.
Args:
user_id: the user being associated with the threepid.
medium: the threepid's medium.
@ -525,3 +545,44 @@ class ThirdPartyEventRules:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_add_user_third_party_identifier(
self, user_id: str, medium: str, address: str
) -> None:
"""Called when an association between a user's Matrix ID and a third-party ID
(email, phone number) has successfully been registered on the homeserver.
Args:
user_id: The User ID included in the association.
medium: The medium of the third-party ID (email, msisdn).
address: The address of the third-party ID (i.e. an email address).
"""
for callback in self._on_add_user_third_party_identifier_callbacks:
try:
await callback(user_id, medium, address)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_remove_user_third_party_identifier(
self, user_id: str, medium: str, address: str
) -> None:
"""Called when an association between a user's Matrix ID and a third-party ID
(email, phone number) has been successfully removed on the homeserver.
This is called *after* any known bindings on identity servers for this
association have been removed.
Args:
user_id: The User ID included in the removed association.
medium: The medium of the third-party ID (email, msisdn).
address: The address of the third-party ID (i.e. an email address).
"""
for callback in self._on_remove_user_third_party_identifier_callbacks:
try:
await callback(user_id, medium, address)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)

View File

@ -38,8 +38,7 @@ from synapse.api.constants import (
)
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict
from synapse.util.frozenutils import unfreeze
from synapse.types import JsonDict, Requester
from . import EventBase
@ -317,8 +316,9 @@ class SerializeEventConfig:
as_client_event: bool = True
# Function to convert from federation format to client format
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
# ID of the user's auth token - used for namespacing of transaction IDs
token_id: Optional[int] = None
# The entity that requested the event. This is used to determine whether to include
# the transaction_id in the unsigned section of the event.
requester: Optional[Requester] = None
# List of event fields to include. If empty, all fields will be returned.
only_event_fields: Optional[List[str]] = None
# Some events can have stripped room state stored in the `unsigned` field.
@ -368,11 +368,24 @@ def serialize_event(
e.unsigned["redacted_because"], time_now_ms, config=config
)
if config.token_id is not None:
if config.token_id == getattr(e.internal_metadata, "token_id", None):
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None:
d["unsigned"]["transaction_id"] = txn_id
# If we have a txn_id saved in the internal_metadata, we should include it in the
# unsigned section of the event if it was sent by the same session as the one
# requesting the event.
# There is a special case for guests, because they only have one access token
# without associated access_token_id, so we always include the txn_id for events
# they sent.
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None and config.requester is not None:
event_token_id = getattr(e.internal_metadata, "token_id", None)
if config.requester.user.to_string() == e.sender and (
(
event_token_id is not None
and config.requester.access_token_id is not None
and event_token_id == config.requester.access_token_id
)
or config.requester.is_guest
):
d["unsigned"]["transaction_id"] = txn_id
# invite_room_state and knock_room_state are a list of stripped room state events
# that are meant to provide metadata about a room to an invitee/knocker. They are
@ -403,14 +416,6 @@ class EventClientSerializer:
clients.
"""
def __init__(self, inhibit_replacement_via_edits: bool = False):
"""
Args:
inhibit_replacement_via_edits: If this is set to True, then events are
never replaced by their edits.
"""
self._inhibit_replacement_via_edits = inhibit_replacement_via_edits
def serialize_event(
self,
event: Union[JsonDict, EventBase],
@ -418,7 +423,6 @@ class EventClientSerializer:
*,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
apply_edits: bool = True,
) -> JsonDict:
"""Serializes a single event.
@ -428,10 +432,7 @@ class EventClientSerializer:
config: Event serialization config
bundle_aggregations: A map from event_id to the aggregations to be bundled
into the event.
apply_edits: Whether the content of the event should be modified to reflect
any replacement in `bundle_aggregations[<event_id>].replace`.
See also the `inhibit_replacement_via_edits` constructor arg: if that is
set to True, then this argument is ignored.
Returns:
The serialized event
"""
@ -450,38 +451,10 @@ class EventClientSerializer:
config,
bundle_aggregations,
serialized_event,
apply_edits=apply_edits,
)
return serialized_event
def _apply_edit(
self, orig_event: EventBase, serialized_event: JsonDict, edit: EventBase
) -> None:
"""Replace the content, preserving existing relations of the serialized event.
Args:
orig_event: The original event.
serialized_event: The original event, serialized. This is modified.
edit: The event which edits the above.
"""
# Ensure we take copies of the edit content, otherwise we risk modifying
# the original event.
edit_content = edit.content.copy()
# Unfreeze the event content if necessary, so that we may modify it below
edit_content = unfreeze(edit_content)
serialized_event["content"] = edit_content.get("m.new_content", {})
# Check for existing relations
relates_to = orig_event.content.get("m.relates_to")
if relates_to:
# Keep the relations, ensuring we use a dict copy of the original
serialized_event["content"]["m.relates_to"] = relates_to.copy()
else:
serialized_event["content"].pop("m.relates_to", None)
def _inject_bundled_aggregations(
self,
event: EventBase,
@ -489,7 +462,6 @@ class EventClientSerializer:
config: SerializeEventConfig,
bundled_aggregations: Dict[str, "BundledAggregations"],
serialized_event: JsonDict,
apply_edits: bool,
) -> None:
"""Potentially injects bundled aggregations into the unsigned portion of the serialized event.
@ -504,9 +476,6 @@ class EventClientSerializer:
While serializing the bundled aggregations this map may be searched
again for additional events in a recursive manner.
serialized_event: The serialized event which may be modified.
apply_edits: Whether the content of the event should be modified to reflect
any replacement in `aggregations.replace` (subject to the
`inhibit_replacement_via_edits` constructor arg).
"""
# We have already checked that aggregations exist for this event.
@ -516,22 +485,12 @@ class EventClientSerializer:
# being serialized.
serialized_aggregations = {}
if event_aggregations.annotations:
serialized_aggregations[
RelationTypes.ANNOTATION
] = event_aggregations.annotations
if event_aggregations.references:
serialized_aggregations[
RelationTypes.REFERENCE
] = event_aggregations.references
if event_aggregations.replace:
# If there is an edit, optionally apply it to the event.
edit = event_aggregations.replace
if apply_edits and not self._inhibit_replacement_via_edits:
self._apply_edit(event, serialized_event, edit)
# Include information about it in the relations dict.
#
# Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships)
@ -539,10 +498,7 @@ class EventClientSerializer:
# `sender` of the edit; however MSC3925 proposes extending it to the whole
# of the edit, which is what we do here.
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
edit,
time_now,
config=config,
apply_edits=False,
event_aggregations.replace, time_now, config=config
)
# Include any threaded replies to this event.

View File

@ -314,7 +314,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# stream position.
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
for ((destination, edu_key), pos) in keyed_edus.items():
for (destination, edu_key), pos in keyed_edus.items():
rows.append(
(
pos,
@ -329,7 +329,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
j = self.edus.bisect_right(to_token) + 1
edus = self.edus.items()[i:j]
for (pos, edu) in edus:
for pos, edu in edus:
rows.append((pos, EduRow(edu)))
# Sort rows based on pos

View File

@ -155,9 +155,6 @@ class AccountDataHandler:
max_stream_id = await self._store.remove_account_data_for_room(
user_id, room_id, account_data_type
)
if max_stream_id is None:
# The referenced account data did not exist, so no delete occurred.
return None
self._notifier.on_new_event(
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
@ -230,9 +227,6 @@ class AccountDataHandler:
max_stream_id = await self._store.remove_account_data_for_user(
user_id, account_data_type
)
if max_stream_id is None:
# The referenced account data did not exist, so no delete occurred.
return None
self._notifier.on_new_event(
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
@ -248,7 +242,6 @@ class AccountDataHandler:
instance_name=random.choice(self._account_data_writers),
user_id=user_id,
account_data_type=account_data_type,
content={},
)
return response["max_stream_id"]

View File

@ -252,16 +252,19 @@ class AdminHandler:
profile = await self.get_user(UserID.from_string(user_id))
if profile is not None:
writer.write_profile(profile)
logger.info("[%s] Written profile", user_id)
# Get all devices the user has
devices = await self._device_handler.get_devices_by_user(user_id)
writer.write_devices(devices)
logger.info("[%s] Written %s devices", user_id, len(devices))
# Get all connections the user has
connections = await self.get_whois(UserID.from_string(user_id))
writer.write_connections(
connections["devices"][""]["sessions"][0]["connections"]
)
logger.info("[%s] Written %s connections", user_id, len(connections))
# Get all account data the user has global and in rooms
global_data = await self._store.get_global_account_data_for_user(user_id)
@ -269,6 +272,29 @@ class AdminHandler:
writer.write_account_data("global", global_data)
for room_id in by_room_data:
writer.write_account_data(room_id, by_room_data[room_id])
logger.info(
"[%s] Written account data for %s rooms", user_id, len(by_room_data)
)
# Get all media ids the user has
limit = 100
start = 0
while True:
media_ids, total = await self._store.get_local_media_by_user_paginate(
start, limit, user_id
)
for media in media_ids:
writer.write_media_id(media["media_id"], media)
logger.info(
"[%s] Written %d media_ids of %s",
user_id,
(start + len(media_ids)),
total,
)
if (start + limit) >= total:
break
start += limit
return writer.finished()
@ -359,6 +385,18 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
"""
raise NotImplementedError()
@abc.abstractmethod
def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
"""Write the media's metadata of a user.
Exports only the metadata, as this can be fetched from the database via
read only. In order to access the files, a connection to the correct
media repository would be required.
Args:
media_id: ID of the media.
media_metadata: Metadata of one media file.
"""
@abc.abstractmethod
def finished(self) -> Any:
"""Called when all data has successfully been exported and written.

View File

@ -737,7 +737,7 @@ class ApplicationServicesHandler:
)
ret = []
for (success, result) in results:
for success, result in results:
if success:
ret.extend(result)

View File

@ -815,7 +815,6 @@ class AuthHandler:
now_ms = self._clock.time_msec()
if existing_token.expiry_ts is not None and existing_token.expiry_ts < now_ms:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"The supplied refresh token has expired",
@ -1543,6 +1542,17 @@ class AuthHandler:
async def add_threepid(
self, user_id: str, medium: str, address: str, validated_at: int
) -> None:
"""
Adds an association between a user's Matrix ID and a third-party ID (email,
phone number).
Args:
user_id: The ID of the user to associate.
medium: The medium of the third-party ID (email, msisdn).
address: The address of the third-party ID (i.e. an email address).
validated_at: The timestamp in ms of when the validation that the user owns
this third-party ID occurred.
"""
# check if medium has a valid value
if medium not in ["email", "msisdn"]:
raise SynapseError(
@ -1567,42 +1577,44 @@ class AuthHandler:
user_id, medium, address, validated_at, self.hs.get_clock().time_msec()
)
# Inform Synapse modules that a 3PID association has been created.
await self._third_party_rules.on_add_user_third_party_identifier(
user_id, medium, address
)
# Deprecated method for informing Synapse modules that a 3PID association
# has successfully been created.
await self._third_party_rules.on_threepid_bind(user_id, medium, address)
async def delete_threepid(
self, user_id: str, medium: str, address: str, id_server: Optional[str] = None
) -> bool:
"""Attempts to unbind the 3pid on the identity servers and deletes it
from the local database.
async def delete_local_threepid(
self, user_id: str, medium: str, address: str
) -> None:
"""Deletes an association between a third-party ID and a user ID from the local
database. This method does not unbind the association from any identity servers.
If `medium` is 'email' and a pusher is associated with this third-party ID, the
pusher will also be deleted.
Args:
user_id: ID of user to remove the 3pid from.
medium: The medium of the 3pid being removed: "email" or "msisdn".
address: The 3pid address to remove.
id_server: Use the given identity server when unbinding
any threepids. If None then will attempt to unbind using the
identity server specified when binding (if known).
Returns:
Returns True if successfully unbound the 3pid on
the identity server, False if identity server doesn't support the
unbind API.
"""
# 'Canonicalise' email addresses as per above
if medium == "email":
address = canonicalise_email(address)
result = await self.hs.get_identity_handler().try_unbind_threepid(
user_id, medium, address, id_server
await self.store.user_delete_threepid(user_id, medium, address)
# Inform Synapse modules that a 3PID association has been deleted.
await self._third_party_rules.on_remove_user_third_party_identifier(
user_id, medium, address
)
await self.store.user_delete_threepid(user_id, medium, address)
if medium == "email":
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id="m.email", pushkey=address, user_id=user_id
)
return result
async def hash(self, password: str) -> str:
"""Computes a secure hash of password.
@ -2259,7 +2271,6 @@ class PasswordAuthProvider:
async def on_logged_out(
self, user_id: str, device_id: Optional[str], access_token: str
) -> None:
# call all of the on_logged_out callbacks
for callback in self.on_logged_out_callbacks:
try:

View File

@ -100,26 +100,28 @@ class DeactivateAccountHandler:
# unbinding
identity_server_supports_unbinding = True
# Retrieve the 3PIDs this user has bound to an identity server
threepids = await self.store.user_get_bound_threepids(user_id)
for threepid in threepids:
# Attempt to unbind any known bound threepids to this account from identity
# server(s).
bound_threepids = await self.store.user_get_bound_threepids(user_id)
for threepid in bound_threepids:
try:
result = await self._identity_handler.try_unbind_threepid(
user_id, threepid["medium"], threepid["address"], id_server
)
identity_server_supports_unbinding &= result
except Exception:
# Do we want this to be a fatal error or should we carry on?
logger.exception("Failed to remove threepid from ID server")
raise SynapseError(400, "Failed to remove threepid from ID server")
await self.store.user_delete_threepid(
identity_server_supports_unbinding &= result
# Remove any local threepid associations for this account.
local_threepids = await self.store.user_get_threepids(user_id)
for threepid in local_threepids:
await self._auth_handler.delete_local_threepid(
user_id, threepid["medium"], threepid["address"]
)
# Remove all 3PIDs this user has bound to the homeserver
await self.store.user_delete_threepids(user_id)
# delete any devices belonging to the user, which will also
# delete corresponding access tokens.
await self._device_handler.delete_all_devices_for_user(user_id)

View File

@ -497,9 +497,11 @@ class DirectoryHandler:
raise SynapseError(403, "Not allowed to publish room")
# Check if publishing is blocked by a third party module
allowed_by_third_party_rules = await (
self.third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
allowed_by_third_party_rules = (
await (
self.third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
)
)
)
if not allowed_by_third_party_rules:

View File

@ -1301,6 +1301,20 @@ class E2eKeysHandler:
return desired_key_data
async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool:
"""Checks if the user has cross-signing set up
Args:
user_id: The user to check
Returns:
True if the user has cross-signing set up, False otherwise
"""
existing_master_key = await self.store.get_e2e_cross_signing_key(
user_id, "master"
)
return existing_master_key is not None
def _check_cross_signing_key(
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None

View File

@ -188,7 +188,6 @@ class E2eRoomKeysHandler:
# XXX: perhaps we should use a finer grained lock here?
async with self._upload_linearizer.queue(user_id):
# Check that the version we're trying to upload is the current version
try:
version_info = await self.store.get_e2e_room_keys_version_info(user_id)

View File

@ -236,7 +236,6 @@ class EventAuthHandler:
# in any of them.
allowed_rooms = await self.get_rooms_that_allow_join(state_ids)
if not await self.is_user_in_rooms(allowed_rooms, user_id):
# If this is a remote request, the user might be in an allowed room
# that we do not know about.
if get_domain_from_id(user_id) != self._server_name:

View File

@ -23,7 +23,7 @@ from synapse.events.utils import SerializeEventConfig
from synapse.handlers.presence import format_user_presence_state
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID
from synapse.types import JsonDict, Requester, UserID
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
@ -46,13 +46,12 @@ class EventStreamHandler:
async def get_stream(
self,
auth_user_id: str,
requester: Requester,
pagin_config: PaginationConfig,
timeout: int = 0,
as_client_event: bool = True,
affect_presence: bool = True,
room_id: Optional[str] = None,
is_guest: bool = False,
) -> JsonDict:
"""Fetches the events stream for a given user."""
@ -62,13 +61,12 @@ class EventStreamHandler:
raise SynapseError(403, "This room has been blocked on this server")
# send any outstanding server notices to the user.
await self._server_notices_sender.on_user_syncing(auth_user_id)
await self._server_notices_sender.on_user_syncing(requester.user.to_string())
auth_user = UserID.from_string(auth_user_id)
presence_handler = self.hs.get_presence_handler()
context = await presence_handler.user_syncing(
auth_user_id,
requester.user.to_string(),
affect_presence=affect_presence,
presence_state=PresenceState.ONLINE,
)
@ -82,10 +80,10 @@ class EventStreamHandler:
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
stream_result = await self.notifier.get_events_for(
auth_user,
requester.user,
pagin_config,
timeout,
is_guest=is_guest,
is_guest=requester.is_guest,
explicit_room_id=room_id,
)
events = stream_result.events
@ -102,7 +100,7 @@ class EventStreamHandler:
if event.membership != Membership.JOIN:
continue
# Send down presence.
if event.state_key == auth_user_id:
if event.state_key == requester.user.to_string():
# Send down presence for everyone in the room.
users: Iterable[str] = await self.store.get_users_in_room(
event.room_id
@ -124,7 +122,9 @@ class EventStreamHandler:
chunks = self._event_serializer.serialize_events(
events,
time_now,
config=SerializeEventConfig(as_client_event=as_client_event),
config=SerializeEventConfig(
as_client_event=as_client_event, requester=requester
),
)
chunk = {

View File

@ -124,7 +124,6 @@ class InitialSyncHandler:
as_client_event: bool = True,
include_archived: bool = False,
) -> JsonDict:
memberships = [Membership.INVITE, Membership.JOIN]
if include_archived:
memberships.append(Membership.LEAVE)
@ -319,11 +318,9 @@ class InitialSyncHandler:
)
is_peeking = member_event_id is None
user_id = requester.user.to_string()
if membership == Membership.JOIN:
result = await self._room_initial_sync_joined(
user_id, room_id, pagin_config, membership, is_peeking
requester, room_id, pagin_config, membership, is_peeking
)
elif membership == Membership.LEAVE:
# The member_event_id will always be available if membership is set
@ -331,10 +328,16 @@ class InitialSyncHandler:
assert member_event_id
result = await self._room_initial_sync_parted(
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
requester,
room_id,
pagin_config,
membership,
member_event_id,
is_peeking,
)
account_data_events = []
user_id = requester.user.to_string()
tags = await self.store.get_tags_for_room(user_id, room_id)
if tags:
account_data_events.append(
@ -351,7 +354,7 @@ class InitialSyncHandler:
async def _room_initial_sync_parted(
self,
user_id: str,
requester: Requester,
room_id: str,
pagin_config: PaginationConfig,
membership: str,
@ -370,13 +373,17 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
self._storage_controllers, user_id, messages, is_peeking=is_peeking
self._storage_controllers,
requester.user.to_string(),
messages,
is_peeking=is_peeking,
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
end_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, stream_token)
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(requester=requester)
return {
"membership": membership,
@ -384,14 +391,18 @@ class InitialSyncHandler:
"messages": {
"chunk": (
# Don't bundle aggregations as this is a deprecated API.
self._event_serializer.serialize_events(messages, time_now)
self._event_serializer.serialize_events(
messages, time_now, config=serialize_options
)
),
"start": await start_token.to_string(self.store),
"end": await end_token.to_string(self.store),
},
"state": (
# Don't bundle aggregations as this is a deprecated API.
self._event_serializer.serialize_events(room_state.values(), time_now)
self._event_serializer.serialize_events(
room_state.values(), time_now, config=serialize_options
)
),
"presence": [],
"receipts": [],
@ -399,7 +410,7 @@ class InitialSyncHandler:
async def _room_initial_sync_joined(
self,
user_id: str,
requester: Requester,
room_id: str,
pagin_config: PaginationConfig,
membership: str,
@ -411,9 +422,12 @@ class InitialSyncHandler:
# TODO: These concurrently
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(requester=requester)
# Don't bundle aggregations as this is a deprecated API.
state = self._event_serializer.serialize_events(
current_state.values(), time_now
current_state.values(),
time_now,
config=serialize_options,
)
now_token = self.hs.get_event_sources().get_current_token()
@ -451,7 +465,10 @@ class InitialSyncHandler:
if not receipts:
return []
return ReceiptEventSource.filter_out_private_receipts(receipts, user_id)
return ReceiptEventSource.filter_out_private_receipts(
receipts,
requester.user.to_string(),
)
presence, receipts, (messages, token) = await make_deferred_yieldable(
gather_results(
@ -470,20 +487,23 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
self._storage_controllers, user_id, messages, is_peeking=is_peeking
self._storage_controllers,
requester.user.to_string(),
messages,
is_peeking=is_peeking,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
end_token = now_token
time_now = self.clock.time_msec()
ret = {
"room_id": room_id,
"messages": {
"chunk": (
# Don't bundle aggregations as this is a deprecated API.
self._event_serializer.serialize_events(messages, time_now)
self._event_serializer.serialize_events(
messages, time_now, config=serialize_options
)
),
"start": await start_token.to_string(self.store),
"end": await end_token.to_string(self.store),

View File

@ -50,7 +50,7 @@ from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.events.utils import maybe_upsert_event_field
from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.logging import opentracing
@ -245,8 +245,11 @@ class MessageHandler:
)
room_state = room_state_events[membership_event_id]
now = self.clock.time_msec()
events = self._event_serializer.serialize_events(room_state.values(), now)
events = self._event_serializer.serialize_events(
room_state.values(),
self.clock.time_msec(),
config=SerializeEventConfig(requester=requester),
)
return events
async def _user_can_see_state_at_event(
@ -574,7 +577,7 @@ class EventCreationHandler:
state_map: Optional[StateMap[str]] = None,
for_batch: bool = False,
current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
) -> Tuple[EventBase, UnpersistedEventContextBase]:
"""
Given a dict from a client, create a new event. If bool for_batch is true, will
create an event using the prev_event_ids, and will create an event context for
@ -721,8 +724,6 @@ class EventCreationHandler:
current_state_group=current_state_group,
)
context = await unpersisted_context.persist(event)
# In an ideal world we wouldn't need the second part of this condition. However,
# this behaviour isn't spec'd yet, meaning we should be able to deactivate this
# behaviour. Another reason is that this code is also evaluated each time a new
@ -739,7 +740,7 @@ class EventCreationHandler:
assert state_map is not None
prev_event_id = state_map.get((EventTypes.Member, event.sender))
else:
prev_state_ids = await context.get_prev_state_ids(
prev_state_ids = await unpersisted_context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.Member, None)])
)
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
@ -764,8 +765,7 @@ class EventCreationHandler:
)
self.validator.validate_new(event, self.config)
return event, context
return event, unpersisted_context
async def _is_exempt_from_privacy_policy(
self, builder: EventBuilder, requester: Requester
@ -1005,7 +1005,7 @@ class EventCreationHandler:
max_retries = 5
for i in range(max_retries):
try:
event, context = await self.create_event(
event, unpersisted_context = await self.create_event(
requester,
event_dict,
txn_id=txn_id,
@ -1016,6 +1016,7 @@ class EventCreationHandler:
historical=historical,
depth=depth,
)
context = await unpersisted_context.persist(event)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
@ -1190,7 +1191,6 @@ class EventCreationHandler:
if for_batch:
assert prev_event_ids is not None
assert state_map is not None
assert current_state_group is not None
auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map)
event = await builder.build(
prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth
@ -2046,7 +2046,7 @@ class EventCreationHandler:
max_retries = 5
for i in range(max_retries):
try:
event, context = await self.create_event(
event, unpersisted_context = await self.create_event(
requester,
{
"type": EventTypes.Dummy,
@ -2055,6 +2055,7 @@ class EventCreationHandler:
"sender": user_id,
},
)
context = await unpersisted_context.persist(event)
event.internal_metadata.proactively_send = False

View File

@ -579,7 +579,9 @@ class PaginationHandler:
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(as_client_event=as_client_event)
serialize_options = SerializeEventConfig(
as_client_event=as_client_event, requester=requester
)
chunk = {
"chunk": (

View File

@ -777,7 +777,6 @@ class PresenceHandler(BasePresenceHandler):
)
if self.unpersisted_users_changes:
await self.store.update_presence(
[
self.user_to_current_state[user_id]
@ -823,7 +822,6 @@ class PresenceHandler(BasePresenceHandler):
now = self.clock.time_msec()
with Measure(self.clock, "presence_update_states"):
# NOTE: We purposefully don't await between now and when we've
# calculated what we want to do with the new states, to avoid races.

View File

@ -476,7 +476,7 @@ class RegistrationHandler:
# create room expects the localpart of the room alias
config["room_alias_name"] = room_alias.localpart
info, _ = await room_creation_handler.create_room(
room_id, _, _ = await room_creation_handler.create_room(
fake_requester,
config=config,
ratelimit=False,
@ -490,7 +490,7 @@ class RegistrationHandler:
user_id, authenticated_entity=self._server_name
),
target=UserID.from_string(user_id),
room_id=info["room_id"],
room_id=room_id,
# Since it was just created, there are no remote hosts.
remote_room_hosts=[],
action="join",

View File

@ -20,6 +20,7 @@ import attr
from synapse.api.constants import Direction, EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
from synapse.events.utils import SerializeEventConfig
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace
from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
@ -60,13 +61,12 @@ class BundledAggregations:
Some values require additional processing during serialization.
"""
annotations: Optional[JsonDict] = None
references: Optional[JsonDict] = None
replace: Optional[EventBase] = None
thread: Optional[_ThreadAggregation] = None
def __bool__(self) -> bool:
return bool(self.annotations or self.references or self.replace or self.thread)
return bool(self.references or self.replace or self.thread)
class RelationsHandler:
@ -152,16 +152,23 @@ class RelationsHandler:
)
now = self._clock.time_msec()
serialize_options = SerializeEventConfig(requester=requester)
return_value: JsonDict = {
"chunk": self._event_serializer.serialize_events(
events, now, bundle_aggregations=aggregations
events,
now,
bundle_aggregations=aggregations,
config=serialize_options,
),
}
if include_original_event:
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
return_value["original_event"] = self._event_serializer.serialize_event(
event, now, bundle_aggregations=None
event,
now,
bundle_aggregations=None,
config=serialize_options,
)
if next_token:
@ -227,67 +234,6 @@ class RelationsHandler:
e.msg,
)
async def get_annotations_for_events(
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
) -> Dict[str, List[JsonDict]]:
"""Get a list of annotations to the given events, grouped by event type and
aggregation key, sorted by count.
This is used e.g. to get the what and how many reactions have happened
on an event.
Args:
event_ids: Fetch events that relate to these event IDs.
ignored_users: The users ignored by the requesting user.
Returns:
A map of event IDs to a list of groups of annotations that match.
Each entry is a dict with `type`, `key` and `count` fields.
"""
# Get the base results for all users.
full_results = await self._main_store.get_aggregation_groups_for_events(
event_ids
)
# Avoid additional logic if there are no ignored users.
if not ignored_users:
return {
event_id: results
for event_id, results in full_results.items()
if results
}
# Then subtract off the results for any ignored users.
ignored_results = await self._main_store.get_aggregation_groups_for_users(
[event_id for event_id, results in full_results.items() if results],
ignored_users,
)
filtered_results = {}
for event_id, results in full_results.items():
# If no annotations, skip.
if not results:
continue
# If there are not ignored results for this event, copy verbatim.
if event_id not in ignored_results:
filtered_results[event_id] = results
continue
# Otherwise, subtract out the ignored results.
event_ignored_results = ignored_results[event_id]
for result in results:
key = (result["type"], result["key"])
if key in event_ignored_results:
# Ensure to not modify the cache.
result = result.copy()
result["count"] -= event_ignored_results[key]
if result["count"] <= 0:
continue
filtered_results.setdefault(event_id, []).append(result)
return filtered_results
async def get_references_for_events(
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
) -> Dict[str, List[_RelatedEvent]]:
@ -531,17 +477,6 @@ class RelationsHandler:
# (as that is what makes it part of the thread).
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
async def _fetch_annotations() -> None:
"""Fetch any annotations (ie, reactions) to bundle with this event."""
annotations_by_event_id = await self.get_annotations_for_events(
events_by_id.keys(), ignored_users=ignored_users
)
for event_id, annotations in annotations_by_event_id.items():
if annotations:
results.setdefault(event_id, BundledAggregations()).annotations = {
"chunk": annotations
}
async def _fetch_references() -> None:
"""Fetch any references to bundle with this event."""
references_by_event_id = await self.get_references_for_events(
@ -575,7 +510,6 @@ class RelationsHandler:
await make_deferred_yieldable(
gather_results(
(
run_in_background(_fetch_annotations),
run_in_background(_fetch_references),
run_in_background(_fetch_edits),
)

View File

@ -51,6 +51,7 @@ from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.snapshot import UnpersistedEventContext
from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
@ -211,7 +212,7 @@ class RoomCreationHandler:
# the required power level to send the tombstone event.
(
tombstone_event,
tombstone_context,
tombstone_unpersisted_context,
) = await self.event_creation_handler.create_event(
requester,
{
@ -225,6 +226,9 @@ class RoomCreationHandler:
},
},
)
tombstone_context = await tombstone_unpersisted_context.persist(
tombstone_event
)
validate_event_for_room_version(tombstone_event)
await self._event_auth_handler.check_auth_rules_from_context(
tombstone_event
@ -690,13 +694,14 @@ class RoomCreationHandler:
config: JsonDict,
ratelimit: bool = True,
creator_join_profile: Optional[JsonDict] = None,
) -> Tuple[dict, int]:
) -> Tuple[str, Optional[RoomAlias], int]:
"""Creates a new room.
Args:
requester:
The user who requested the room creation.
config : A dict of configuration options.
requester: The user who requested the room creation.
config: A dict of configuration options. This will be the body of
a /createRoom request; see
https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
ratelimit: set to False to disable the rate limiter
creator_join_profile:
@ -707,14 +712,17 @@ class RoomCreationHandler:
`avatar_url` and/or `displayname`.
Returns:
First, a dict containing the keys `room_id` and, if an alias
was, requested, `room_alias`. Secondly, the stream_id of the
last persisted event.
A 3-tuple containing:
- the room ID;
- if requested, the room alias, otherwise None; and
- the `stream_id` of the last persisted event.
Raises:
SynapseError if the room ID couldn't be stored, 3pid invitation config
validation failed, or something went horribly wrong.
ResourceLimitError if server is blocked to some resource being
exceeded
SynapseError:
if the room ID couldn't be stored, 3pid invitation config
validation failed, or something went horribly wrong.
ResourceLimitError:
if server is blocked to some resource being
exceeded
"""
user_id = requester.user.to_string()
@ -864,9 +872,11 @@ class RoomCreationHandler:
)
# Check whether this visibility value is blocked by a third party module
allowed_by_third_party_rules = await (
self.third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
allowed_by_third_party_rules = (
await (
self.third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
)
)
)
if not allowed_by_third_party_rules:
@ -1024,11 +1034,6 @@ class RoomCreationHandler:
last_sent_event_id = member_event_id
depth += 1
result = {"room_id": room_id}
if room_alias:
result["room_alias"] = room_alias.to_string()
# Always wait for room creation to propagate before returning
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(room_id),
@ -1036,7 +1041,7 @@ class RoomCreationHandler:
last_stream_id,
)
return result, last_stream_id
return room_id, room_alias, last_stream_id
async def _send_events_for_new_room(
self,
@ -1091,7 +1096,7 @@ class RoomCreationHandler:
content: JsonDict,
for_batch: bool,
**kwargs: Any,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
) -> Tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]:
"""
Creates an event and associated event context.
Args:
@ -1110,20 +1115,23 @@ class RoomCreationHandler:
event_dict = create_event_dict(etype, content, **kwargs)
new_event, new_context = await self.event_creation_handler.create_event(
(
new_event,
new_unpersisted_context,
) = await self.event_creation_handler.create_event(
creator,
event_dict,
prev_event_ids=prev_event,
depth=depth,
state_map=state_map,
for_batch=for_batch,
current_state_group=current_state_group,
)
depth += 1
prev_event = [new_event.event_id]
state_map[(new_event.type, new_event.state_key)] = new_event.event_id
return new_event, new_context
return new_event, new_unpersisted_context
try:
config = self._presets_dict[preset_config]
@ -1133,10 +1141,10 @@ class RoomCreationHandler:
)
creation_content.update({"creator": creator_id})
creation_event, creation_context = await create_event(
creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False
)
creation_context = await unpersisted_creation_context.persist(creation_event)
logger.debug("Sending %s in new room", EventTypes.Member)
ev = await self.event_creation_handler.handle_new_client_event(
requester=creator,
@ -1180,7 +1188,6 @@ class RoomCreationHandler:
power_event, power_context = await create_event(
EventTypes.PowerLevels, pl_content, True
)
current_state_group = power_context._state_group
events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
@ -1229,14 +1236,12 @@ class RoomCreationHandler:
power_level_content,
True,
)
current_state_group = pl_context._state_group
events_to_send.append((pl_event, pl_context))
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
)
current_state_group = room_alias_context._state_group
events_to_send.append((room_alias_event, room_alias_context))
if (EventTypes.JoinRules, "") not in initial_state:
@ -1245,7 +1250,6 @@ class RoomCreationHandler:
{"join_rule": config["join_rules"]},
True,
)
current_state_group = join_rules_context._state_group
events_to_send.append((join_rules_event, join_rules_context))
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
@ -1254,7 +1258,6 @@ class RoomCreationHandler:
{"history_visibility": config["history_visibility"]},
True,
)
current_state_group = visibility_context._state_group
events_to_send.append((visibility_event, visibility_context))
if config["guest_can_join"]:
@ -1264,14 +1267,12 @@ class RoomCreationHandler:
{EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
True,
)
current_state_group = guest_access_context._state_group
events_to_send.append((guest_access_event, guest_access_context))
for (etype, state_key), content in initial_state.items():
event, context = await create_event(
etype, content, True, state_key=state_key
)
current_state_group = context._state_group
events_to_send.append((event, context))
if config["encrypted"]:
@ -1283,9 +1284,16 @@ class RoomCreationHandler:
)
events_to_send.append((encryption_event, encryption_context))
datastore = self.hs.get_datastores().state
events_and_context = (
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
events_to_send, room_id, current_state_group, datastore
)
)
last_event = await self.event_creation_handler.handle_new_client_event(
creator,
events_to_send,
events_and_context,
ignore_shadow_ban=True,
ratelimit=False,
)
@ -1825,7 +1833,7 @@ class RoomShutdownHandler:
new_room_user_id, authenticated_entity=requester_user_id
)
info, stream_id = await self._room_creation_handler.create_room(
new_room_id, _, stream_id = await self._room_creation_handler.create_room(
room_creator_requester,
config={
"preset": RoomCreationPreset.PUBLIC_CHAT,
@ -1834,7 +1842,6 @@ class RoomShutdownHandler:
},
ratelimit=False,
)
new_room_id = info["room_id"]
logger.info(
"Shutting down room %r, joining to new room: %r", room_id, new_room_id
@ -1887,6 +1894,7 @@ class RoomShutdownHandler:
# Join users to new room
if new_room_user_id:
assert new_room_id is not None
await self.room_member_handler.update_membership(
requester=target_requester,
target=target_requester.user,
@ -1919,6 +1927,7 @@ class RoomShutdownHandler:
aliases_for_room = await self.store.get_aliases_for_room(room_id)
assert new_room_id is not None
await self.store.update_aliases_for_room(
room_id, new_room_id, requester_user_id
)

View File

@ -327,7 +327,7 @@ class RoomBatchHandler:
# Mark all events as historical
event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
event, context = await self.event_creation_handler.create_event(
event, unpersisted_context = await self.event_creation_handler.create_event(
await self.create_requester_for_user_id_from_app_service(
ev["sender"], app_service_requester.app_service
),
@ -345,7 +345,7 @@ class RoomBatchHandler:
historical=True,
depth=inherited_depth,
)
context = await unpersisted_context.persist(event)
assert context._state_group
# Normally this is done when persisting the event but we have to
@ -374,7 +374,7 @@ class RoomBatchHandler:
# correct stream_ordering as they are backfilled (which decrements).
# Events are sorted by (topological_ordering, stream_ordering)
# where topological_ordering is just depth.
for (event, context) in reversed(events_to_persist):
for event, context in reversed(events_to_persist):
# This call can't raise `PartialStateConflictError` since we forbid
# use of the historical batch API during partial state
await self.event_creation_handler.handle_new_client_event(

View File

@ -207,6 +207,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def remote_knock(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
@ -414,7 +415,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
max_retries = 5
for i in range(max_retries):
try:
event, context = await self.event_creation_handler.create_event(
(
event,
unpersisted_context,
) = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
@ -435,7 +439,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier=outlier,
historical=historical,
)
context = await unpersisted_context.persist(event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types([(EventTypes.Member, None)])
)
@ -1084,7 +1088,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
return await self.remote_knock(
remote_room_hosts, room_id, target, content
requester, remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
@ -1958,7 +1962,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
max_retries = 5
for i in range(max_retries):
try:
event, context = await self.event_creation_handler.create_event(
(
event,
unpersisted_context,
) = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
@ -1966,6 +1973,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
auth_event_ids=auth_event_ids,
outlier=True,
)
context = await unpersisted_context.persist(event)
event.internal_metadata.out_of_band_membership = True
result_event = (
@ -1991,6 +1999,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
async def remote_knock(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,

View File

@ -113,6 +113,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
async def remote_knock(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
@ -123,9 +124,10 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
Implements RoomMemberHandler.remote_knock
"""
ret = await self._remote_knock_client(
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user=user,
user_id=user.to_string(),
content=content,
)
return ret["event_id"], ret["stream_id"]

View File

@ -23,7 +23,8 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.events import EventBase
from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID
from synapse.events.utils import SerializeEventConfig
from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID
from synapse.types.state import StateFilter
from synapse.visibility import filter_events_for_client
@ -109,12 +110,12 @@ class SearchHandler:
return historical_room_ids
async def search(
self, user: UserID, content: JsonDict, batch: Optional[str] = None
self, requester: Requester, content: JsonDict, batch: Optional[str] = None
) -> JsonDict:
"""Performs a full text search for a user.
Args:
user: The user performing the search.
requester: The user performing the search.
content: Search parameters
batch: The next_batch parameter. Used for pagination.
@ -199,7 +200,7 @@ class SearchHandler:
)
return await self._search(
user,
requester,
batch_group,
batch_group_key,
batch_token,
@ -217,7 +218,7 @@ class SearchHandler:
async def _search(
self,
user: UserID,
requester: Requester,
batch_group: Optional[str],
batch_group_key: Optional[str],
batch_token: Optional[str],
@ -235,7 +236,7 @@ class SearchHandler:
"""Performs a full text search for a user.
Args:
user: The user performing the search.
requester: The user performing the search.
batch_group: Pagination information.
batch_group_key: Pagination information.
batch_token: Pagination information.
@ -269,7 +270,7 @@ class SearchHandler:
# TODO: Search through left rooms too
rooms = await self.store.get_rooms_for_local_user_where_membership_is(
user.to_string(),
requester.user.to_string(),
membership_list=[Membership.JOIN],
# membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
)
@ -303,13 +304,13 @@ class SearchHandler:
if order_by == "rank":
search_result, sender_group = await self._search_by_rank(
user, room_ids, search_term, keys, search_filter
requester.user, room_ids, search_term, keys, search_filter
)
# Unused return values for rank search.
global_next_batch = None
elif order_by == "recent":
search_result, global_next_batch = await self._search_by_recent(
user,
requester.user,
room_ids,
search_term,
keys,
@ -334,7 +335,7 @@ class SearchHandler:
assert after_limit is not None
contexts = await self._calculate_event_contexts(
user,
requester.user,
search_result.allowed_events,
before_limit,
after_limit,
@ -363,27 +364,37 @@ class SearchHandler:
# The returned events.
search_result.allowed_events,
),
user.to_string(),
requester.user.to_string(),
)
# We're now about to serialize the events. We should not make any
# blocking calls after this. Otherwise, the 'age' will be wrong.
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(requester=requester)
for context in contexts.values():
context["events_before"] = self._event_serializer.serialize_events(
context["events_before"], time_now, bundle_aggregations=aggregations
context["events_before"],
time_now,
bundle_aggregations=aggregations,
config=serialize_options,
)
context["events_after"] = self._event_serializer.serialize_events(
context["events_after"], time_now, bundle_aggregations=aggregations
context["events_after"],
time_now,
bundle_aggregations=aggregations,
config=serialize_options,
)
results = [
{
"rank": search_result.rank_map[e.event_id],
"result": self._event_serializer.serialize_event(
e, time_now, bundle_aggregations=aggregations
e,
time_now,
bundle_aggregations=aggregations,
config=serialize_options,
),
"context": contexts.get(e.event_id, {}),
}
@ -398,7 +409,9 @@ class SearchHandler:
if state_results:
rooms_cat_res["state"] = {
room_id: self._event_serializer.serialize_events(state_events, time_now)
room_id: self._event_serializer.serialize_events(
state_events, time_now, config=serialize_options
)
for room_id, state_events in state_results.items()
}

View File

@ -1297,7 +1297,6 @@ class SyncHandler:
return RoomNotifCounts.empty()
with Measure(self.clock, "unread_notifs_for_room_id"):
return await self.store.get_unread_event_push_actions_by_room_for_user(
room_id,
sync_config.user.to_string(),

View File

@ -44,6 +44,7 @@ from twisted.internet.interfaces import (
IAddress,
IDelayedCall,
IHostResolution,
IOpenSSLContextFactory,
IReactorCore,
IReactorPluggableNameResolver,
IReactorTime,
@ -958,8 +959,8 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self._context.set_verify(VERIFY_NONE, lambda *_: False)
def getContext(self, hostname=None, port=None):
def getContext(self) -> SSL.Context:
return self._context
def creatorForNetloc(self, hostname: bytes, port: int):
def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory:
return self

View File

@ -440,7 +440,7 @@ class MatrixFederationHttpClient:
Args:
request: details of request to be sent
retry_on_dns_fail: true if the request should be retied on DNS failures
retry_on_dns_fail: true if the request should be retried on DNS failures
timeout: number of milliseconds to wait for the response headers
(including connecting to the server), *for each attempt*.
@ -475,7 +475,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@ -871,7 +871,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@ -958,7 +958,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@ -1036,6 +1036,8 @@ class MatrixFederationHttpClient:
args: A dictionary used to create query strings, defaults to
None.
retry_on_dns_fail: true if the request should be retried on DNS failures
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
@ -1063,7 +1065,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@ -1141,7 +1143,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@ -1197,7 +1199,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.

View File

@ -524,6 +524,7 @@ def whitelisted_homeserver(destination: str) -> bool:
# Start spans and scopes
# Could use kwargs but I want these to be explicit
def start_active_span(
operation_name: str,

479
synapse/media/_base.py Normal file
View File

@ -0,0 +1,479 @@
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import urllib
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
import attr
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii, parse_and_validate_server_name
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
# none is given
TEXT_CONTENT_TYPES = [
"text/css",
"text/csv",
"text/html",
"text/calendar",
"text/plain",
"text/javascript",
"application/json",
"application/ld+json",
"application/rtf",
"image/svg+xml",
"text/xml",
]
def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
"""Parses the server name, media ID and optional file name from the request URI
Also performs some rough validation on the server name.
Args:
request: The `Request`.
Returns:
A tuple containing the parsed server name, media ID and optional file name.
Raises:
SynapseError(404): if parsing or validation fail for any reason
"""
try:
# The type on postpath seems incorrect in Twisted 21.2.0.
postpath: List[bytes] = request.postpath # type: ignore
assert postpath
# This allows users to append e.g. /test.png to the URL. Useful for
# clients that parse the URL to see content type.
server_name_bytes, media_id_bytes = postpath[:2]
server_name = server_name_bytes.decode("utf-8")
media_id = media_id_bytes.decode("utf8")
# Validate the server name, raising if invalid
parse_and_validate_server_name(server_name)
file_name = None
if len(postpath) > 2:
try:
file_name = urllib.parse.unquote(postpath[-1].decode("utf-8"))
except UnicodeDecodeError:
pass
return server_name, media_id, file_name
except Exception:
raise SynapseError(
404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
)
def respond_404(request: SynapseRequest) -> None:
respond_with_json(
request,
404,
cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
send_cors=True,
)
async def respond_with_file(
request: SynapseRequest,
media_type: str,
file_path: str,
file_size: Optional[int] = None,
upload_name: Optional[str] = None,
) -> None:
logger.debug("Responding with %r", file_path)
if os.path.isfile(file_path):
if file_size is None:
stat = os.stat(file_path)
file_size = stat.st_size
add_file_headers(request, media_type, file_size, upload_name)
with open(file_path, "rb") as f:
await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
finish_request(request)
else:
respond_404(request)
def add_file_headers(
request: Request,
media_type: str,
file_size: Optional[int],
upload_name: Optional[str],
) -> None:
"""Adds the correct response headers in preparation for responding with the
media.
Args:
request
media_type: The media/content type.
file_size: Size in bytes of the media, if known.
upload_name: The name of the requested file, if any.
"""
def _quote(x: str) -> str:
return urllib.parse.quote(x.encode("utf-8"))
# Default to a UTF-8 charset for text content types.
# ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
if media_type.lower() in TEXT_CONTENT_TYPES:
content_type = media_type + "; charset=UTF-8"
else:
content_type = media_type
request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
if upload_name:
# RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
#
# `filename` is defined to be a `value`, which is defined by RFC2616
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
# US-ASCII string surrounded by double-quotes, using backslash as an
# escape character. Note that %-encoding is *not* permitted.
#
# `filename*` is defined to be an `ext-value`, which is defined in
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
# where `value-chars` is essentially a %-encoded string in the given charset.
#
# [1]: https://tools.ietf.org/html/rfc6266#section-4.1
# [2]: https://tools.ietf.org/html/rfc2616#section-3.6
# [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
# We avoid the quoted-string version of `filename`, because (a) synapse didn't
# correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
# may as well just do the filename* version.
if _can_encode_filename_as_token(upload_name):
disposition = "inline; filename=%s" % (upload_name,)
else:
disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
# recommend caching as it's sensitive or private - or at least
# select private. don't bother setting Expires as all our
# clients are smart enough to be happy with Cache-Control
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))
# Tell web crawlers to not index, archive, or follow links in media. This
# should help to prevent things in the media repo from showing up in web
# search results.
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
# separators as defined in RFC2616. SP and HT are handled separately.
# see _can_encode_filename_as_token.
_FILENAME_SEPARATOR_CHARS = {
"(",
")",
"<",
">",
"@",
",",
";",
":",
"\\",
'"',
"/",
"[",
"]",
"?",
"=",
"{",
"}",
}
def _can_encode_filename_as_token(x: str) -> bool:
for c in x:
# from RFC2616:
#
# token = 1*<any CHAR except CTLs or separators>
#
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
# | "/" | "[" | "]" | "?" | "="
# | "{" | "}" | SP | HT
#
# CHAR = <any US-ASCII character (octets 0 - 127)>
#
# CTL = <any US-ASCII control character
# (octets 0 - 31) and DEL (127)>
#
if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
return False
return True
async def respond_with_responder(
request: SynapseRequest,
responder: "Optional[Responder]",
media_type: str,
file_size: Optional[int],
upload_name: Optional[str] = None,
) -> None:
"""Responds to the request with given responder. If responder is None then
returns 404.
Args:
request
responder
media_type: The media/content type.
file_size: Size in bytes of the media. If not known it should be None
upload_name: The name of the requested file, if any.
"""
if not responder:
respond_404(request)
return
# If we have a responder we *must* use it as a context manager.
with responder:
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
await responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
# Unregister the producer, if it has one, so Twisted doesn't complain
if request.producer:
request.unregisterProducer()
finish_request(request)
class Responder(ABC):
"""Represents a response that can be streamed to the requester.
Responder is a context manager which *must* be used, so that any resources
held can be cleaned up.
"""
@abstractmethod
def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
"""Stream response into consumer
Args:
consumer: The consumer to stream into.
Returns:
Resolves once the response has finished being written
"""
raise NotImplementedError()
def __enter__(self) -> None: # noqa: B027
pass
def __exit__( # noqa: B027
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
pass
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ThumbnailInfo:
"""Details about a generated thumbnail."""
width: int
height: int
method: str
# Content type of thumbnail, e.g. image/png
type: str
# The size of the media file, in bytes.
length: Optional[int] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class FileInfo:
"""Details about a requested/uploaded file."""
# The server name where the media originated from, or None if local.
server_name: Optional[str]
# The local ID of the file. For local files this is the same as the media_id
file_id: str
# If the file is for the url preview cache
url_cache: bool = False
# Whether the file is a thumbnail or not.
thumbnail: Optional[ThumbnailInfo] = None
# The below properties exist to maintain compatibility with third-party modules.
@property
def thumbnail_width(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.width
@property
def thumbnail_height(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.height
@property
def thumbnail_method(self) -> Optional[str]:
if not self.thumbnail:
return None
return self.thumbnail.method
@property
def thumbnail_type(self) -> Optional[str]:
if not self.thumbnail:
return None
return self.thumbnail.type
@property
def thumbnail_length(self) -> Optional[int]:
if not self.thumbnail:
return None
return self.thumbnail.length
def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers: The HTTP request headers.
Returns:
The filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b""])
# No header, bail out.
if not content_disposition[0]:
return None
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
# We have a filename*= section. This MUST be ASCII, and any UTF-8
# bytes are %-quoted.
try:
# Once it is decoded, we can then unquote the %-encoded
# parts strictly into a unicode string.
upload_name = urllib.parse.unquote(
upload_name_utf8.decode("ascii"), errors="strict"
)
except UnicodeDecodeError:
# Incorrect UTF-8.
pass
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii.decode("ascii")
# This may be None here, indicating we did not find a matching name.
return upload_name
def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line: header to be parsed
Returns:
The main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b"=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s: bytes) -> Generator[bytes, None, None]:
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s: header to be parsed
Returns:
The split input
"""
while s[:1] == b";":
s = s[1:]
# look for the next ;
end = s.find(b";")
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]

View File

@ -32,18 +32,10 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
from synapse.config._base import ConfigError
from synapse.config.repository import ThumbnailRequirement
from synapse.http.server import UnrecognizedRequestResource
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import random_string
from ._base import (
from synapse.media._base import (
FileInfo,
Responder,
ThumbnailInfo,
@ -51,15 +43,15 @@ from ._base import (
respond_404,
respond_with_responder,
)
from .config_resource import MediaConfigResource
from .download_resource import DownloadResource
from .filepath import MediaFilePaths
from .media_storage import MediaStorage
from .preview_url_resource import PreviewUrlResource
from .storage_provider import StorageProviderWrapper
from .thumbnail_resource import ThumbnailResource
from .thumbnailer import Thumbnailer, ThumbnailError
from .upload_resource import UploadResource
from synapse.media.filepath import MediaFilePaths
from synapse.media.media_storage import MediaStorage
from synapse.media.storage_provider import StorageProviderWrapper
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import random_string
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -1044,69 +1036,3 @@ class MediaRepository:
removed_media.append(media_id)
return removed_media, len(removed_media)
class MediaRepositoryResource(UnrecognizedRequestResource):
"""File uploading and downloading.
Uploads are POSTed to a resource which returns a token which is used to GET
the download::
=> POST /_matrix/media/r0/upload HTTP/1.1
Content-Type: <media-type>
Content-Length: <content-length>
<media>
<= HTTP/1.1 200 OK
Content-Type: application/json
{ "content_uri": "mxc://<server-name>/<media-id>" }
=> GET /_matrix/media/r0/download/<server-name>/<media-id> HTTP/1.1
<= HTTP/1.1 200 OK
Content-Type: <media-type>
Content-Disposition: attachment;filename=<upload-filename>
<media>
Clients can get thumbnails by supplying a desired width and height and
thumbnailing method::
=> GET /_matrix/media/r0/thumbnail/<server_name>
/<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1
<= HTTP/1.1 200 OK
Content-Type: image/jpeg or image/png
<thumbnail>
The thumbnail methods are "crop" and "scale". "scale" tries to return an
image where either the width or the height is smaller than the requested
size. The client should then scale and letterbox the image if it needs to
fit within a given rectangle. "crop" tries to return an image where the
width and height are close to the requested size and the aspect matches
the requested size. The client should scale the image if it needs to fit
within a given rectangle.
"""
def __init__(self, hs: "HomeServer"):
# If we're not configured to use it, raise if we somehow got here.
if not hs.config.media.can_load_media_repo:
raise ConfigError("Synapse is not configured to use a media repo.")
super().__init__()
media_repo = hs.get_media_repository()
self.putChild(b"upload", UploadResource(hs, media_repo))
self.putChild(b"download", DownloadResource(hs, media_repo))
self.putChild(
b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage)
)
if hs.config.media.url_preview_enabled:
self.putChild(
b"preview_url",
PreviewUrlResource(hs, media_repo, media_repo.media_storage),
)
self.putChild(b"config", MediaConfigResource(hs))

View File

@ -0,0 +1,374 @@
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import os
import shutil
from types import TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
Awaitable,
BinaryIO,
Callable,
Generator,
Optional,
Sequence,
Tuple,
Type,
)
import attr
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
import synapse
from synapse.api.errors import NotFoundError
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
from synapse.util import Clock
from synapse.util.file_consumer import BackgroundFileConsumer
from ._base import FileInfo, Responder
from .filepath import MediaFilePaths
if TYPE_CHECKING:
from synapse.media.storage_provider import StorageProvider
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class MediaStorage:
"""Responsible for storing/fetching files from local sources.
Args:
hs
local_media_directory: Base path where we store media on disk
filepaths
storage_providers: List of StorageProvider that are used to fetch and store files.
"""
def __init__(
self,
hs: "HomeServer",
local_media_directory: str,
filepaths: MediaFilePaths,
storage_providers: Sequence["StorageProvider"],
):
self.hs = hs
self.reactor = hs.get_reactor()
self.local_media_directory = local_media_directory
self.filepaths = filepaths
self.storage_providers = storage_providers
self.spam_checker = hs.get_spam_checker()
self.clock = hs.get_clock()
async def store_file(self, source: IO, file_info: FileInfo) -> str:
"""Write `source` to the on disk media store, and also any other
configured storage providers
Args:
source: A file like object that should be written
file_info: Info about the file to store
Returns:
the file path written to in the primary media store
"""
with self.store_into_file(file_info) as (f, fname, finish_cb):
# Write to the main repository
await self.write_to_file(source, f)
await finish_cb()
return fname
async def write_to_file(self, source: IO, output: IO) -> None:
"""Asynchronously write the `source` to `output`."""
await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
@contextlib.contextmanager
def store_into_file(
self, file_info: FileInfo
) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]:
"""Context manager used to get a file like object to write into, as
described by file_info.
Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
like object that can be written to, fname is the absolute path of file
on disk, and finish_cb is a function that returns an awaitable.
fname can be used to read the contents from after upload, e.g. to
generate thumbnails.
finish_cb must be called and waited on after the file has been
successfully been written to. Should not be called if there was an
error.
Args:
file_info: Info about the file to store
Example:
with media_storage.store_into_file(info) as (f, fname, finish_cb):
# .. write into f ...
await finish_cb()
"""
path = self._file_info_to_path(file_info)
fname = os.path.join(self.local_media_directory, path)
dirname = os.path.dirname(fname)
os.makedirs(dirname, exist_ok=True)
finished_called = [False]
try:
with open(fname, "wb") as f:
async def finish() -> None:
# Ensure that all writes have been flushed and close the
# file.
f.flush()
f.close()
spam_check = await self.spam_checker.check_media_file_for_spam(
ReadableFileWrapper(self.clock, fname), file_info
)
if spam_check != synapse.module_api.NOT_SPAM:
logger.info("Blocking media due to spam checker")
# Note that we'll delete the stored media, due to the
# try/except below. The media also won't be stored in
# the DB.
# We currently ignore any additional field returned by
# the spam-check API.
raise SpamMediaException(errcode=spam_check[0])
for provider in self.storage_providers:
await provider.store_file(path, file_info)
finished_called[0] = True
yield f, fname, finish
except Exception as e:
try:
os.remove(fname)
except Exception:
pass
raise e from None
if not finished_called:
raise Exception("Finished callback not called")
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
"""Attempts to fetch media described by file_info from the local cache
and configured storage providers.
Args:
file_info
Returns:
Returns a Responder if the file was found, otherwise None.
"""
paths = [self._file_info_to_path(file_info)]
# fallback for remote thumbnails with no method in the filename
if file_info.thumbnail and file_info.server_name:
paths.append(
self.filepaths.remote_media_thumbnail_rel_legacy(
server_name=file_info.server_name,
file_id=file_info.file_id,
width=file_info.thumbnail.width,
height=file_info.thumbnail.height,
content_type=file_info.thumbnail.type,
)
)
for path in paths:
local_path = os.path.join(self.local_media_directory, path)
if os.path.exists(local_path):
logger.debug("responding with local file %s", local_path)
return FileResponder(open(local_path, "rb"))
logger.debug("local file %s did not exist", local_path)
for provider in self.storage_providers:
for path in paths:
res: Any = await provider.fetch(path, file_info)
if res:
logger.debug("Streaming %s from %s", path, provider)
return res
logger.debug("%s not found on %s", path, provider)
return None
async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
"""Ensures that the given file is in the local cache. Attempts to
download it from storage providers if it isn't.
Args:
file_info
Returns:
Full path to local file
"""
path = self._file_info_to_path(file_info)
local_path = os.path.join(self.local_media_directory, path)
if os.path.exists(local_path):
return local_path
# Fallback for paths without method names
# Should be removed in the future
if file_info.thumbnail and file_info.server_name:
legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy(
server_name=file_info.server_name,
file_id=file_info.file_id,
width=file_info.thumbnail.width,
height=file_info.thumbnail.height,
content_type=file_info.thumbnail.type,
)
legacy_local_path = os.path.join(self.local_media_directory, legacy_path)
if os.path.exists(legacy_local_path):
return legacy_local_path
dirname = os.path.dirname(local_path)
os.makedirs(dirname, exist_ok=True)
for provider in self.storage_providers:
res: Any = await provider.fetch(path, file_info)
if res:
with res:
consumer = BackgroundFileConsumer(
open(local_path, "wb"), self.reactor
)
await res.write_to_consumer(consumer)
await consumer.wait()
return local_path
raise NotFoundError()
def _file_info_to_path(self, file_info: FileInfo) -> str:
"""Converts file_info into a relative path.
The path is suitable for storing files under a directory, e.g. used to
store files on local FS under the base media repository directory.
"""
if file_info.url_cache:
if file_info.thumbnail:
return self.filepaths.url_cache_thumbnail_rel(
media_id=file_info.file_id,
width=file_info.thumbnail.width,
height=file_info.thumbnail.height,
content_type=file_info.thumbnail.type,
method=file_info.thumbnail.method,
)
return self.filepaths.url_cache_filepath_rel(file_info.file_id)
if file_info.server_name:
if file_info.thumbnail:
return self.filepaths.remote_media_thumbnail_rel(
server_name=file_info.server_name,
file_id=file_info.file_id,
width=file_info.thumbnail.width,
height=file_info.thumbnail.height,
content_type=file_info.thumbnail.type,
method=file_info.thumbnail.method,
)
return self.filepaths.remote_media_filepath_rel(
file_info.server_name, file_info.file_id
)
if file_info.thumbnail:
return self.filepaths.local_media_thumbnail_rel(
media_id=file_info.file_id,
width=file_info.thumbnail.width,
height=file_info.thumbnail.height,
content_type=file_info.thumbnail.type,
method=file_info.thumbnail.method,
)
return self.filepaths.local_media_filepath_rel(file_info.file_id)
def _write_file_synchronously(source: IO, dest: IO) -> None:
"""Write `source` to the file like `dest` synchronously. Should be called
from a thread.
Args:
source: A file like object that's to be written
dest: A file like object to be written to
"""
source.seek(0) # Ensure we read from the start of the file
shutil.copyfileobj(source, dest)
class FileResponder(Responder):
"""Wraps an open file that can be sent to a request.
Args:
open_file: A file like object to be streamed ot the client,
is closed when finished streaming.
"""
def __init__(self, open_file: IO):
self.open_file = open_file
def write_to_consumer(self, consumer: IConsumer) -> Deferred:
return make_deferred_yieldable(
FileSender().beginFileTransfer(self.open_file, consumer)
)
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.open_file.close()
class SpamMediaException(NotFoundError):
"""The media was blocked by a spam checker, so we simply 404 the request (in
the same way as if it was quarantined).
"""
@attr.s(slots=True, auto_attribs=True)
class ReadableFileWrapper:
"""Wrapper that allows reading a file in chunks, yielding to the reactor,
and writing to a callback.
This is simplified `FileSender` that takes an IO object rather than an
`IConsumer`.
"""
CHUNK_SIZE = 2**14
clock: Clock
path: str
async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None:
"""Reads the file in chunks and calls the callback with each chunk."""
with open(self.path, "rb") as file:
while True:
chunk = file.read(self.CHUNK_SIZE)
if not chunk:
break
callback(chunk)
# We yield to the reactor by sleeping for 0 seconds.
await self.clock.sleep(0)

View File

@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, List, Optional
import attr
from synapse.rest.media.v1.preview_html import parse_html_description
from synapse.media.preview_html import parse_html_description
from synapse.types import JsonDict
from synapse.util import json_decoder

View File

@ -0,0 +1,181 @@
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import os
import shutil
from typing import TYPE_CHECKING, Callable, Optional
from synapse.config._base import Config
from synapse.logging.context import defer_to_thread, run_in_background
from synapse.util.async_helpers import maybe_awaitable
from ._base import FileInfo, Responder
from .media_storage import FileResponder
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from synapse.server import HomeServer
class StorageProvider(metaclass=abc.ABCMeta):
"""A storage provider is a service that can store uploaded media and
retrieve them.
"""
@abc.abstractmethod
async def store_file(self, path: str, file_info: FileInfo) -> None:
"""Store the file described by file_info. The actual contents can be
retrieved by reading the file in file_info.upload_path.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
"""
@abc.abstractmethod
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""Attempt to fetch the file described by file_info and stream it
into writer.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
Returns:
Returns a Responder if the provider has the file, otherwise returns None.
"""
class StorageProviderWrapper(StorageProvider):
"""Wraps a storage provider and provides various config options
Args:
backend: The storage provider to wrap.
store_local: Whether to store new local files or not.
store_synchronous: Whether to wait for file to be successfully
uploaded, or todo the upload in the background.
store_remote: Whether remote media should be uploaded
"""
def __init__(
self,
backend: StorageProvider,
store_local: bool,
store_synchronous: bool,
store_remote: bool,
):
self.backend = backend
self.store_local = store_local
self.store_synchronous = store_synchronous
self.store_remote = store_remote
def __str__(self) -> str:
return "StorageProviderWrapper[%s]" % (self.backend,)
async def store_file(self, path: str, file_info: FileInfo) -> None:
if not file_info.server_name and not self.store_local:
return None
if file_info.server_name and not self.store_remote:
return None
if file_info.url_cache:
# The URL preview cache is short lived and not worth offloading or
# backing up.
return None
if self.store_synchronous:
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore
else:
# TODO: Handle errors.
async def store() -> None:
try:
return await maybe_awaitable(
self.backend.store_file(path, file_info)
)
except Exception:
logger.exception("Error storing file")
run_in_background(store)
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
if file_info.url_cache:
# Files in the URL preview cache definitely aren't stored here,
# so avoid any potentially slow I/O or network access.
return None
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
return await maybe_awaitable(self.backend.fetch(path, file_info))
class FileStorageProviderBackend(StorageProvider):
"""A storage provider that stores files in a directory on a filesystem.
Args:
hs
config: The config returned by `parse_config`.
"""
def __init__(self, hs: "HomeServer", config: str):
self.hs = hs
self.cache_directory = hs.config.media.media_store_path
self.base_directory = config
def __str__(self) -> str:
return "FileStorageProviderBackend[%s]" % (self.base_directory,)
async def store_file(self, path: str, file_info: FileInfo) -> None:
"""See StorageProvider.store_file"""
primary_fname = os.path.join(self.cache_directory, path)
backup_fname = os.path.join(self.base_directory, path)
dirname = os.path.dirname(backup_fname)
os.makedirs(dirname, exist_ok=True)
# mypy needs help inferring the type of the second parameter, which is generic
shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
await defer_to_thread(
self.hs.get_reactor(),
shutil_copyfile,
primary_fname,
backup_fname,
)
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""See StorageProvider.fetch"""
backup_fname = os.path.join(self.base_directory, path)
if os.path.isfile(backup_fname):
return FileResponder(open(backup_fname, "rb"))
return None
@staticmethod
def parse_config(config: dict) -> str:
"""Called on startup to parse config supplied. This should parse
the config and raise if there is a problem.
The returned value is passed into the constructor.
In this case we only care about a single param, the directory, so let's
just pull that out.
"""
return Config.ensure_directory(config["directory"])

View File

@ -38,7 +38,6 @@ class ThumbnailError(Exception):
class Thumbnailer:
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"}
@staticmethod

View File

@ -87,7 +87,6 @@ class LaterGauge(Collector):
]
def collect(self) -> Iterable[Metric]:
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
try:

View File

@ -139,7 +139,6 @@ def install_gc_manager() -> None:
class PyPyGCStats(Collector):
def collect(self) -> Iterable[Metric]:
# @stats is a pretty-printer object with __str__() returning a nice table,
# plus some fields that contain data from that table.
# unfortunately, fields are pretty-printed themselves (i. e. '4.5MB').

View File

@ -64,9 +64,11 @@ from synapse.events.third_party_rules import (
CHECK_EVENT_ALLOWED_CALLBACK,
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
ON_CREATE_ROOM_CALLBACK,
ON_NEW_EVENT_CALLBACK,
ON_PROFILE_UPDATE_CALLBACK,
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
ON_THREEPID_BIND_CALLBACK,
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
)
@ -357,6 +359,12 @@ class ModuleApi:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = None,
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
on_add_user_third_party_identifier: Optional[
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = None,
on_remove_user_third_party_identifier: Optional[
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
] = None,
) -> None:
"""Registers callbacks for third party event rules capabilities.
@ -373,6 +381,8 @@ class ModuleApi:
on_profile_update=on_profile_update,
on_user_deactivation_status_changed=on_user_deactivation_status_changed,
on_threepid_bind=on_threepid_bind,
on_add_user_third_party_identifier=on_add_user_third_party_identifier,
on_remove_user_third_party_identifier=on_remove_user_third_party_identifier,
)
def register_presence_router_callbacks(
@ -1576,14 +1586,14 @@ class ModuleApi:
)
requester = create_requester(user_id)
room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room(
room_id, room_alias, _ = await self._hs.get_room_creation_handler().create_room(
requester=requester,
config=config,
ratelimit=ratelimit,
creator_join_profile=creator_join_profile,
)
return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None)
room_alias_str = room_alias.to_string() if room_alias else None
return room_id, room_alias_str
async def set_displayname(
self,

View File

@ -23,7 +23,6 @@ from typing import (
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
@ -276,7 +275,7 @@ class BulkPushRuleEvaluator:
if related_event is not None:
related_events[relation_type] = _flatten_dict(
related_event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
)
reply_event_id = (
@ -294,7 +293,7 @@ class BulkPushRuleEvaluator:
if related_event is not None:
related_events["m.in_reply_to"] = _flatten_dict(
related_event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
)
# indicate that this is from a fallback relation.
@ -330,7 +329,6 @@ class BulkPushRuleEvaluator:
context: EventContext,
event_id_to_event: Mapping[str, EventBase],
) -> None:
if (
not event.internal_metadata.is_notifiable()
or event.internal_metadata.is_historical()
@ -397,26 +395,17 @@ class BulkPushRuleEvaluator:
del notification_levels[key]
# Pull out any user and room mentions.
mentions = event.content.get(EventContentFields.MSC3952_MENTIONS)
has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict)
user_mentions: Set[str] = set()
if has_mentions:
# mypy seems to have lost the type even though it must be a dict here.
assert isinstance(mentions, dict)
# Remove out any non-string items and convert to a set.
user_mentions_raw = mentions.get("user_ids")
if isinstance(user_mentions_raw, list):
user_mentions = set(
filter(lambda item: isinstance(item, str), user_mentions_raw)
)
has_mentions = (
self._intentional_mentions_enabled
and EventContentFields.MSC3952_MENTIONS in event.content
)
evaluator = PushRuleEvaluator(
_flatten_dict(
event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
),
has_mentions,
user_mentions,
room_member_count,
sender_power_level,
notification_levels,
@ -424,7 +413,6 @@ class BulkPushRuleEvaluator:
self._related_event_match_enabled,
event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
self.hs.config.experimental.msc3758_exact_event_match,
self.hs.config.experimental.msc3966_exact_event_property_contains,
)
@ -508,7 +496,7 @@ def _flatten_dict(
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, JsonValue]] = None,
*,
msc3783_escape_event_match_key: bool = False,
msc3873_escape_event_match_key: bool = False,
) -> Dict[str, JsonValue]:
"""
Given a JSON dictionary (or event) which might contain sub dictionaries,
@ -537,7 +525,7 @@ def _flatten_dict(
if result is None:
result = {}
for key, value in d.items():
if msc3783_escape_event_match_key:
if msc3873_escape_event_match_key:
# Escape periods in the key with a backslash (and backslashes with an
# extra backslash). This is since a period is used as a separator between
# nested fields.
@ -553,7 +541,7 @@ def _flatten_dict(
value,
prefix=(prefix + [key]),
result=result,
msc3783_escape_event_match_key=msc3783_escape_event_match_key,
msc3873_escape_event_match_key=msc3873_escape_event_match_key,
)
# `room_version` should only ever be set when looking at the top level of an event

View File

@ -41,11 +41,12 @@ def format_push_rules_for_user(
rulearray.append(template_rule)
pattern_type = template_rule.pop("pattern_type", None)
if pattern_type == "user_id":
template_rule["pattern"] = user.to_string()
elif pattern_type == "user_localpart":
template_rule["pattern"] = user.localpart
for type_key in ("pattern", "value"):
type_value = template_rule.pop(f"{type_key}_type", None)
if type_value == "user_id":
template_rule[type_key] = user.to_string()
elif type_value == "user_localpart":
template_rule[type_key] = user.localpart
template_rule["enabled"] = enabled

View File

@ -265,7 +265,6 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
@staticmethod
async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override]
return {}
async def _handle_request( # type: ignore[override]

View File

@ -195,7 +195,6 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
async def _serialize_payload( # type: ignore[override]
user_id: str, device_id: str, keys: JsonDict
) -> JsonDict:
return {
"user_id": user_id,
"device_id": device_id,

View File

@ -142,17 +142,12 @@ class ReplicationRemoteKnockRestServlet(ReplicationEndpoint):
}
async def _handle_request( # type: ignore[override]
self,
request: SynapseRequest,
content: JsonDict,
room_id: str,
user_id: str,
self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str
) -> Tuple[int, JsonDict]:
remote_room_hosts = content["remote_room_hosts"]
event_content = content["content"]
requester = Requester.deserialize(self.store, content["requester"])
request.requester = requester
logger.debug("remote_knock: %s on room: %s", user_id, room_id)
@ -277,16 +272,12 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint):
}
async def _handle_request( # type: ignore[override]
self,
request: SynapseRequest,
content: JsonDict,
knock_event_id: str,
self, request: SynapseRequest, content: JsonDict, knock_event_id: str
) -> Tuple[int, JsonDict]:
txn_id = content["txn_id"]
event_content = content["content"]
requester = Requester.deserialize(self.store, content["requester"])
request.requester = requester
# hopefully we're now on the master, so this won't recurse!
@ -363,3 +354,5 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationRemoteJoinRestServlet(hs).register(http_server)
ReplicationRemoteRejectInviteRestServlet(hs).register(http_server)
ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server)
ReplicationRemoteKnockRestServlet(hs).register(http_server)
ReplicationRemoteRescindKnockRestServlet(hs).register(http_server)

View File

@ -328,7 +328,6 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
outbound_redis_connection: txredisapi.ConnectionHandler,
channel_names: List[str],
):
super().__init__(
hs,
uuid="subscriber",

View File

@ -139,7 +139,6 @@ class EventsStream(Stream):
current_token: Token,
target_row_count: int,
) -> StreamUpdateResult:
# the events stream merges together three separate sources:
# * new events
# * current_state changes

View File

@ -108,8 +108,7 @@ class ClientRestResource(JsonResource):
if is_main_process:
logout.register_servlets(hs, client_resource)
sync.register_servlets(hs, client_resource)
if is_main_process:
filter.register_servlets(hs, client_resource)
filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
register.register_servlets(hs, client_resource)
if is_main_process:
@ -140,7 +139,7 @@ class ClientRestResource(JsonResource):
relations.register_servlets(hs, client_resource)
if is_main_process:
password_policy.register_servlets(hs, client_resource)
knock.register_servlets(hs, client_resource)
knock.register_servlets(hs, client_resource)
# moving to /_synapse/admin
if is_main_process:

Some files were not shown because too many files have changed in this diff Show More