Merge branch 'release-v1.78' into matrix-org-hotfixes

anoa/redirect_instances
Olivier Wilkinson (reivilibre) 2023-02-21 14:47:40 +00:00
commit e0f9a514c6
198 changed files with 3451 additions and 1724 deletions

View File

@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@bd10f381a96414ce2b13a11bfa89902ba7cea07f # v2.24.3
uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@ -27,7 +27,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@ -61,7 +61,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@ -134,7 +134,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2

24
.github/workflows/poetry_lockfile.yaml vendored Normal file
View File

@ -0,0 +1,24 @@
on:
push:
branches: ["develop", "release-*"]
paths:
- poetry.lock
pull_request:
paths:
- poetry.lock
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-sdists:
name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- run: pip install tomli
- run: ./scripts-dev/check_locked_deps_have_sdists.py

View File

@ -112,7 +112,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
components: clippy
@ -134,7 +134,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
components: clippy
@ -154,7 +154,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
components: rustfmt
@ -221,7 +221,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@ -266,7 +266,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@ -386,7 +386,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@ -531,7 +531,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@ -562,7 +562,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@ -585,7 +585,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2

View File

@ -18,7 +18,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@ -43,7 +43,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@ -82,7 +82,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2

View File

@ -1,3 +1,98 @@
Synapse 1.78.0rc1 (2023-02-21)
==============================
Features
--------
- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964))
- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969))
- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004))
- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034))
- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042))
- Experimental support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): the `exact_event_property_contains` push rule condition. ([\#15045](https://github.com/matrix-org/synapse/issues/15045))
- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073))
- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075))
Bugfixes
--------
- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779))
- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605))
- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038))
- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068))
- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074))
- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079))
- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080))
- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096))
- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108))
Improved Documentation
----------------------
- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892))
- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959))
- Document how to start Synapse in the contributing guide. ([\#15022](https://github.com/matrix-org/synapse/issues/15022))
- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078))
- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083))
Internal Changes
----------------
- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755))
- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606))
- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675))
- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742))
- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834))
- Prevent 'WARNING: there is already a transaction in progress' lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840))
- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929))
- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973))
- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977))
- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980))
- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982))
- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084))
- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037))
- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040))
- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041))
- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043))
- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047))
- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053))
- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054))
- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069))
- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070))
- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114))
- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120))
<details><summary>Locked dependency updates</summary>
- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059))
- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020))
- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033))
- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060))
- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061))
- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062))
- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063))
- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064))
- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065))
- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099))
- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100))
- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101))
- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102))
- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104))
- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105))
</details>
Synapse 1.77.0 (2023-02-14)
===========================
No significant changes since 1.77.0rc2.
Synapse 1.77.0rc2 (2023-02-10)
==============================
@ -57,7 +152,7 @@ Internal Changes
- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014))
- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002))
<details><summary>Dependabot updates</summary>
<details><summary>Locked dependency updates</summary>
- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968))
- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952))

8
Cargo.lock generated
View File

@ -232,9 +232,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.7.0"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
dependencies = [
"arc-swap",
"log",
@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.92"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
dependencies = [
"itoa",
"ryu",

View File

@ -0,0 +1,28 @@
# Schema symlinks
This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc.
In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against).
## Example
![](datagrip-aware-of-schema.png)
## Caveats
- Doesn't include temporary tables created ad-hoc by Synapse.
- Postgres only. IDEs will likely be confused by SQLite-specific queries.
- Will not include migrations created after the latest schema dump.
- Symlinks might confuse checkouts on Windows systems.
## Instructions
### Jetbrains IDEs with DataGrip plugin
- View -> Tool Windows -> Database
- `+` Icon -> DDL Data Source
- Pick a name, e.g. `Synapse schema dump`
- Under sources, click `+`.
- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL.
- OK, and OK.
- IDE should now be aware of the schema.
- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`.

1
contrib/datagrip/common.sql Symbolic link
View File

@ -0,0 +1 @@
../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

1
contrib/datagrip/main.sql Symbolic link
View File

@ -0,0 +1 @@
../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres

View File

@ -0,0 +1 @@
../../synapse/storage/schema/common/schema_version.sql

1
contrib/datagrip/state.sql Symbolic link
View File

@ -0,0 +1 @@
../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres

View File

@ -68,6 +68,7 @@ redis:
enabled: true
host: redis
port: 6379
# dbid: <redis_logical_db_id>
# password: <secret_password>
```

13
debian/changelog vendored
View File

@ -1,3 +1,16 @@
matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium
* Add `matrix-org-archive-keyring` package as recommended.
* New Synapse release 1.78.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Feb 2023 14:29:19 +0000
matrix-synapse-py3 (1.77.0) stable; urgency=medium
* New Synapse release 1.77.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Feb 2023 12:59:02 +0100
matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium
* New Synapse release 1.77.0rc2.

1
debian/control vendored
View File

@ -37,6 +37,7 @@ Depends:
# so we put perl:Depends in Suggests rather than Depends.
Recommends:
${shlibs1:Recommends},
matrix-org-archive-keyring,
Suggests:
sqlite3,
${perl:Depends},

View File

@ -235,6 +235,14 @@ The following fields are returned in the JSON response body:
Request:
```
POST /_synapse/admin/v1/media/delete?before_ts=<before_ts>
{}
```
*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint:
```
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
@ -243,7 +251,7 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.*
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
Files that were last used before this timestamp will be deleted. It is the timestamp of
last access, not the timestamp when the file was created.

View File

@ -73,6 +73,15 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge
not recommended, as it can be difficult to configure the TLS certificates correctly in
this case, and it offers little advantage over `.well-known` delegation.
Please keep in mind that server delegation is a function of server-server communication,
and as such using SRV DNS records will not cover use cases involving client-server comms.
This means setting global client settings (such as a Jitsi endpoint, or disabling
creating new rooms as encrypted by default, etc) will still require that you serve a file
from the `https://<server_name>/.well-known/` endpoints defined in the spec! If you are
considering using SRV DNS delegation to avoid serving files from this endpoint, consider
the impact that you will not be able to change those client-based default values globally,
and will be relegated to the featureset of the configuration of each individual client.
However, if you really need it, you can find some documentation on what such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).

View File

@ -78,6 +78,19 @@ poetry install --extras all
This will install the runtime and developer dependencies for the project.
## Running Synapse via poetry
To start a local instance of Synapse in the locked poetry environment, create a config file:
```sh
cp docs/sample_config.yaml homeserver.yaml
```
Now edit homeserver.yaml, and run Synapse with:
```sh
poetry run python -m synapse.app.homeserver -c homeserver.yaml
```
# 5. Get in touch.

View File

@ -88,6 +88,15 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.78.0
## Deprecate the `/_synapse/admin/v1/media/<server_name>/delete` admin API
Synapse 1.78.0 replaces the `/_synapse/admin/v1/media/<server_name>/delete`
admin API with an identical endpoint at `/_synapse/admin/v1/media/delete`. Please
update your tooling to use the new endpoint. The deprecated version will be removed
in a future release.
# Upgrading to v1.76.0
## Faster joins are enabled by default
@ -137,6 +146,7 @@ and then do `pip install matrix-synapse[user-search]` for a PyPI install.
Docker images and Debian packages need nothing specific as they already
include or specify ICU as an explicit dependency.
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed

View File

@ -71,6 +71,9 @@ output-directory
│ ├───invite_state
│ └───knock_state
└───user_data
├───account_data
│ ├───global
│ └───<room_id>
├───connections
├───devices
└───profile

View File

@ -1,4 +1,4 @@
This blog post by Victor Berger explains how to use many of the tools listed on this page: https://levans.fr/shrink-synapse-database.html
_This [blog post by Jackson Chen](https://jacksonchen666.com/posts/2022-12-03/14-33-00/) (Dec 2022) explains how to use many of the tools listed on this page. There is also an [earlier blog by Victor Berger](https://levans.fr/shrink-synapse-database.html) (June 2020), though this may be outdated in places._
# List of useful tools and scripts for maintenance Synapse database:
@ -15,4 +15,4 @@ The purge history API allows server admins to purge historic events from their d
Tool for compressing (deduplicating) `state_groups_state` table.
## [SQL for analyzing Synapse PostgreSQL database stats](useful_sql_for_admins.md)
Some easy SQL that reports useful stats about your Synapse database.
Some easy SQL that reports useful stats about your Synapse database.

View File

@ -2232,7 +2232,7 @@ key on startup and store it in this file.
Example configuration:
```yaml
registration_shared_secret_file: /path/to/secrets/file
registration_shared_secret_path: /path/to/secrets/file
```
_Added in Synapse 1.67.0._
@ -3927,6 +3927,9 @@ This setting has the following sub-options:
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
localhost and 6379
* `password`: Optional password if configured on the Redis instance.
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
_Added in Synapse 1.78.0._
Example configuration:
```yaml
@ -3935,6 +3938,7 @@ redis:
host: localhost
port: 6379
password: <secret_password>
dbid: <dbid>
```
---
## Individual worker configuration

View File

@ -160,7 +160,18 @@ recommend the use of `systemd` where available: for information on setting up
[Systemd with Workers](systemd-with-workers/). To use `synctl`, see
[Using synctl with Workers](synctl_workers.md).
## Start Synapse with Poetry
The following applies to Synapse installations that have been installed from source using `poetry`.
You can start the main Synapse process with Poetry by running the following command:
```console
poetry run synapse_homeserver -c [your homeserver.yaml]
```
For worker setups, you can run the following command
```console
poetry run synapse_worker -c [your worker.yaml]
```
## Available worker applications
### `synapse.app.generic_worker`

View File

@ -31,10 +31,6 @@ exclude = (?x)
|synapse/storage/databases/__init__.py
|synapse/storage/databases/main/cache.py
|synapse/storage/schema/
|tests/module_api/test_api.py
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
)$
[mypy-synapse.federation.transport.client]
@ -55,87 +51,12 @@ warn_unused_ignores = False
[mypy-synapse.util.caches.treecache]
disallow_untyped_defs = False
[mypy-synapse.server]
disallow_untyped_defs = False
[mypy-synapse.storage.database]
disallow_untyped_defs = False
[mypy-tests.*]
disallow_untyped_defs = False
[mypy-tests.api.*]
disallow_untyped_defs = True
[mypy-tests.app.*]
disallow_untyped_defs = True
[mypy-tests.appservice.*]
disallow_untyped_defs = True
[mypy-tests.config.*]
disallow_untyped_defs = True
[mypy-tests.crypto.*]
disallow_untyped_defs = True
[mypy-tests.events.*]
disallow_untyped_defs = True
[mypy-tests.federation.*]
disallow_untyped_defs = True
[mypy-tests.handlers.*]
disallow_untyped_defs = True
[mypy-tests.http.*]
disallow_untyped_defs = True
[mypy-tests.logging.*]
disallow_untyped_defs = True
[mypy-tests.metrics.*]
disallow_untyped_defs = True
[mypy-tests.push.*]
disallow_untyped_defs = True
[mypy-tests.replication.*]
disallow_untyped_defs = True
[mypy-tests.rest.*]
disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True
[mypy-tests.storage.*]
disallow_untyped_defs = True
[mypy-tests.test_server]
disallow_untyped_defs = True
[mypy-tests.test_state]
disallow_untyped_defs = True
[mypy-tests.test_terms_auth]
disallow_untyped_defs = True
[mypy-tests.types.*]
disallow_untyped_defs = True
[mypy-tests.util.caches.*]
disallow_untyped_defs = True
[mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False
[mypy-tests.util.*]
disallow_untyped_defs = True
[mypy-tests.utils]
disallow_untyped_defs = True
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
;; The `typeshed` project maintains stubs here:

212
poetry.lock generated
View File

@ -127,14 +127,14 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "bleach"
version = "5.0.1"
version = "6.0.0"
description = "An easy safelist-based HTML-sanitizing tool."
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"},
{file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
{file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"},
{file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"},
]
[package.dependencies]
@ -143,18 +143,17 @@ webencodings = "*"
[package.extras]
css = ["tinycss2 (>=1.1.0,<1.2)"]
dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"]
[[package]]
name = "canonicaljson"
version = "1.6.4"
version = "1.6.5"
description = "Canonical JSON"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
{file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
{file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
{file = "canonicaljson-1.6.5-py3-none-any.whl", hash = "sha256:806ea6f2cbb7405d20259e1c36dd1214ba5c242fa9165f5bd0bf2081f82c23fb"},
{file = "canonicaljson-1.6.5.tar.gz", hash = "sha256:68dfc157b011e07d94bf74b5d4ccc01958584ed942d9dfd5fdd706609e81cd4b"},
]
[package.dependencies]
@ -339,50 +338,49 @@ files = [
[[package]]
name = "cryptography"
version = "38.0.4"
version = "39.0.1"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
python-versions = ">=3.6"
files = [
{file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"},
{file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"},
{file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"},
{file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"},
{file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"},
{file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"},
{file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"},
{file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"},
{file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"},
{file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"},
{file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"},
{file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"},
{file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"},
{file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"},
{file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"},
{file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"},
{file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"},
{file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"},
{file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"},
{file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"},
{file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"},
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"},
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"},
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"},
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"},
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"},
{file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"},
{file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"},
{file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"},
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"},
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"},
{file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"},
{file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"},
{file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"},
]
[package.dependencies]
cffi = ">=1.12"
[package.extras]
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"]
sdist = ["setuptools-rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"]
test-randomorder = ["pytest-randomly"]
tox = ["tox"]
[[package]]
name = "defusedxml"
@ -1148,36 +1146,38 @@ files = [
[[package]]
name = "mypy"
version = "0.981"
version = "1.0.0"
description = "Optional static typing for Python"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
{file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
{file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
{file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
{file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
{file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
{file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
{file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
{file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
{file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
{file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
{file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
{file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
{file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
{file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
{file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
{file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
{file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
{file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
{file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
{file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
{file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
{file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
{file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
{file = "mypy-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0626db16705ab9f7fa6c249c017c887baf20738ce7f9129da162bb3075fc1af"},
{file = "mypy-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ace23f6bb4aec4604b86c4843276e8fa548d667dbbd0cb83a3ae14b18b2db6c"},
{file = "mypy-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87edfaf344c9401942883fad030909116aa77b0fa7e6e8e1c5407e14549afe9a"},
{file = "mypy-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0ab090d9240d6b4e99e1fa998c2d0aa5b29fc0fb06bd30e7ad6183c95fa07593"},
{file = "mypy-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:7cc2c01dfc5a3cbddfa6c13f530ef3b95292f926329929001d45e124342cd6b7"},
{file = "mypy-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14d776869a3e6c89c17eb943100f7868f677703c8a4e00b3803918f86aafbc52"},
{file = "mypy-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb2782a036d9eb6b5a6efcdda0986774bf798beef86a62da86cb73e2a10b423d"},
{file = "mypy-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cfca124f0ac6707747544c127880893ad72a656e136adc935c8600740b21ff5"},
{file = "mypy-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8845125d0b7c57838a10fd8925b0f5f709d0e08568ce587cc862aacce453e3dd"},
{file = "mypy-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b1b9e1ed40544ef486fa8ac022232ccc57109f379611633ede8e71630d07d2"},
{file = "mypy-1.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c7cf862aef988b5fbaa17764ad1d21b4831436701c7d2b653156a9497d92c83c"},
{file = "mypy-1.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd187d92b6939617f1168a4fe68f68add749902c010e66fe574c165c742ed88"},
{file = "mypy-1.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4e5175026618c178dfba6188228b845b64131034ab3ba52acaffa8f6c361f805"},
{file = "mypy-1.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2f6ac8c87e046dc18c7d1d7f6653a66787a4555085b056fe2d599f1f1a2a2d21"},
{file = "mypy-1.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7306edca1c6f1b5fa0bc9aa645e6ac8393014fa82d0fa180d0ebc990ebe15964"},
{file = "mypy-1.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3cfad08f16a9c6611e6143485a93de0e1e13f48cfb90bcad7d5fde1c0cec3d36"},
{file = "mypy-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67cced7f15654710386e5c10b96608f1ee3d5c94ca1da5a2aad5889793a824c1"},
{file = "mypy-1.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a86b794e8a56ada65c573183756eac8ac5b8d3d59daf9d5ebd72ecdbb7867a43"},
{file = "mypy-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:50979d5efff8d4135d9db293c6cb2c42260e70fb010cbc697b1311a4d7a39ddb"},
{file = "mypy-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ae4c7a99e5153496243146a3baf33b9beff714464ca386b5f62daad601d87af"},
{file = "mypy-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e398652d005a198a7f3c132426b33c6b85d98aa7dc852137a2a3be8890c4072"},
{file = "mypy-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be78077064d016bc1b639c2cbcc5be945b47b4261a4f4b7d8923f6c69c5c9457"},
{file = "mypy-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92024447a339400ea00ac228369cd242e988dd775640755fa4ac0c126e49bb74"},
{file = "mypy-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fe523fcbd52c05040c7bee370d66fee8373c5972171e4fbc323153433198592d"},
{file = "mypy-1.0.0-py3-none-any.whl", hash = "sha256:2efa963bdddb27cb4a0d42545cd137a8d2b883bd181bbc4525b568ef6eca258f"},
{file = "mypy-1.0.0.tar.gz", hash = "sha256:f34495079c8d9da05b183f9f7daec2878280c2ad7cc81da686ef0b484cea2ecf"},
]
[package.dependencies]
@ -1188,6 +1188,7 @@ typing-extensions = ">=3.10"
[package.extras]
dmypy = ["psutil (>=4.0)"]
install-types = ["pip"]
python2 = ["typed-ast (>=1.4.0,<2)"]
reports = ["lxml"]
@ -1205,18 +1206,18 @@ files = [
[[package]]
name = "mypy-zope"
version = "0.3.11"
version = "0.9.0"
description = "Plugin for mypy to support zope interfaces"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
{file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
{file = "mypy-zope-0.9.0.tar.gz", hash = "sha256:88bf6cd056e38b338e6956055958a7805b4ff84404ccd99e29883a3647a1aeb3"},
{file = "mypy_zope-0.9.0-py3-none-any.whl", hash = "sha256:e1bb4b57084f76ff8a154a3e07880a1af2ac6536c491dad4b143d529f72c5d15"},
]
[package.dependencies]
mypy = "0.981"
mypy = "1.0.0"
"zope.interface" = "*"
"zope.schema" = "*"
@ -1970,28 +1971,28 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
[[package]]
name = "ruff"
version = "0.0.230"
version = "0.0.237"
description = "An extremely fast Python linter, written in Rust."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.0.230-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:fcc31d02cebda0a85e2e13a44642aea7f84362cb4f589e2f6b864e3928e4a7db"},
{file = "ruff-0.0.230-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:45a7f2c7155d520b8ca255a01235763d5c25fd5e7af055e50a78c6d91ece0ced"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4eca8b185ab56cac67acc23287c3c8c62a0c0ffadc0787a3bef3a6e77eaed82f"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec2bcdb5040efd8082a3a98369eec4bdc5fd05f53cc6714cb2b725d557d4abe8"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26571aee2b93b60e47e44478f72a9787b387f752e85b85f176739bd91b27cfd1"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b69c9883c3e264f8bb2d52bdabb88b8d9672750ea05f33e0ff52532824bd5c5"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b3dc88b83f200378a9b9c91036989f0285a10759514c42235ce02e5824ac8d0"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:767716f008dd3a40ec2318396f648fda437c6968087a4526cde5879e382cf477"},
{file = "ruff-0.0.230-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac27a0f9b96d9923cef7d911790a21a19b51aec0f08375ccc47ad735b1054d78"},
{file = "ruff-0.0.230-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:729dfc7b7ad4f7d8761dc60c58f15372d6f5c2dd9b6c5952524f2bc3aec7de6a"},
{file = "ruff-0.0.230-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ad086cf2e5fef274687121f673f0f9b60c8981ec07c2bb0448c459cbaef81bcb"},
{file = "ruff-0.0.230-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4feaed0978c24687133cd11c7380de20aa841f893e24430c735cc6c3faba4837"},
{file = "ruff-0.0.230-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d1046d0d43a0f24b2e9e61d76bb201b486ad02e9787d3432af43bd7d16f2c2e"},
{file = "ruff-0.0.230-py3-none-win32.whl", hash = "sha256:4d627911c9ba57bcd2f2776f1c09a10d334db163cb5be8c892e7ec7b59ccf58c"},
{file = "ruff-0.0.230-py3-none-win_amd64.whl", hash = "sha256:27fd4891a1d0642f5b2038ebf86f8169bc3d466964bdfaa0ce2a65149bc7cced"},
{file = "ruff-0.0.230.tar.gz", hash = "sha256:a049f93af1057ac450e8c09559d44e371eda1c151b1b863c0013a1066fefddb0"},
{file = "ruff-0.0.237-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:2ea04d826ffca58a7ae926115a801960c757d53c9027f2ca9acbe84c9f2b2f04"},
{file = "ruff-0.0.237-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8ed113937fab9f73f8c1a6c0350bb4fe03e951370139c6e0adb81f48a8dcf4c6"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcb71a3efb5fe886eb48d739cfae5df4a15617e7b5a7668aa45ebf74c0d3fa"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80ce10718abbf502818c0d650ebab99fdcef5e937a1ded3884493ddff804373c"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cc6cb7c1efcc260df5a939435649610a28f9f438b8b313384c8985ac6574f9f"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7eef0c7a1e45a4e30328ae101613575944cbf47a3a11494bf9827722da6c66b3"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d122433a21ce4a21fbba34b73fc3add0ccddd1643b3ff5abb8d2767952f872e"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b76311335adda4de3c1d471e64e89a49abfeebf02647e3db064e7740e7f36ed6"},
{file = "ruff-0.0.237-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c5977b643aaf2b6f84641265f835b6c7f67fcca38dbae08c4f15602e084ca0"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ed86d0d4d742360a262d52191581f12b669a68e59ae3b52e80d7483b3d7b3"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fedfb60f986c26cdb1809db02866e68508db99910c587d2c4066a5c07aa85593"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_i686.whl", hash = "sha256:bb96796be5919871fa9ae7e88968ba9e14306d9a3f217ca6c204f68a5abeccdd"},
{file = "ruff-0.0.237-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea239cfedf67b74ea4952e1074bb99a4281c2145441d70bc7e2f058d5c49f1c9"},
{file = "ruff-0.0.237-py3-none-win32.whl", hash = "sha256:8d6a1d21ae15da2b1dcffeee2606e90de0e6717e72957da7d16ab6ae18dd0058"},
{file = "ruff-0.0.237-py3-none-win_amd64.whl", hash = "sha256:525e5ec81cee29b993f77976026a6bf44528a14aa6edb1ef47bd8079147395ae"},
{file = "ruff-0.0.237.tar.gz", hash = "sha256:630c575f543733adf6c19a11d9a02ca9ecc364bd7140af8a4c854d4728be6b56"},
]
[[package]]
@ -2028,14 +2029,14 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "1.13.0"
version = "1.15.0"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
python-versions = "*"
files = [
{file = "sentry-sdk-1.13.0.tar.gz", hash = "sha256:72da0766c3069a3941eadbdfa0996f83f5a33e55902a19ba399557cfee1dddcc"},
{file = "sentry_sdk-1.13.0-py2.py3-none-any.whl", hash = "sha256:b7ff6318183e551145b5c4766eb65b59ad5b63ff234dffddc5fb50340cad6729"},
{file = "sentry-sdk-1.15.0.tar.gz", hash = "sha256:69ecbb2e1ff4db02a06c4f20f6f69cb5dfe3ebfbc06d023e40d77cf78e9c37e7"},
{file = "sentry_sdk-1.15.0-py2.py3-none-any.whl", hash = "sha256:7ad4d37dd093f4a7cb5ad804c6efe9e8fab8873f7ffc06042dc3f3fd700a93ec"},
]
[package.dependencies]
@ -2053,7 +2054,8 @@ falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
opentelemetry = ["opentelemetry-distro (>=0.350b0)"]
huey = ["huey (>=2)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
pure-eval = ["asttokens", "executing", "pure-eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
@ -2255,13 +2257,13 @@ files = [
[[package]]
name = "systemd-python"
version = "234"
version = "235"
description = "Python interface for libsystemd"
category = "main"
optional = true
python-versions = "*"
files = [
{file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"},
{file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"},
]
[[package]]
@ -2546,14 +2548,14 @@ files = [
[[package]]
name = "types-bleach"
version = "5.0.3.1"
version = "6.0.0.0"
description = "Typing stubs for bleach"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
{file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
{file = "types-bleach-6.0.0.0.tar.gz", hash = "sha256:770ce9c7ea6173743ef1a4a70f2619bb1819bf53c7cd0336d939af93f488fbe2"},
{file = "types_bleach-6.0.0.0-py3-none-any.whl", hash = "sha256:75f55f035837c5fce2cd0bd5162a2a90057680a89c9275588a5c12f5f597a14a"},
]
[[package]]
@ -2622,14 +2624,14 @@ files = [
[[package]]
name = "types-jsonschema"
version = "4.17.0.3"
version = "4.17.0.5"
description = "Typing stubs for jsonschema"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-jsonschema-4.17.0.3.tar.gz", hash = "sha256:746aa466ffed9a1acc7bdbd0ac0b5e068f00be2ee008c1d1e14b0944a8c8b24b"},
{file = "types_jsonschema-4.17.0.3-py3-none-any.whl", hash = "sha256:c8d5b26b7c8da6a48d7fb1ce029b97e0ff6e74db3727efb968c69f39ad013685"},
{file = "types-jsonschema-4.17.0.5.tar.gz", hash = "sha256:7adc7bfca4afe291de0c93eca9367aa72a4fbe8ce87fe15642c600ad97d45dd6"},
{file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"},
]
[[package]]
@ -2646,14 +2648,14 @@ files = [
[[package]]
name = "types-pillow"
version = "9.4.0.5"
version = "9.4.0.13"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-Pillow-9.4.0.5.tar.gz", hash = "sha256:941cefaac2f5297d7d2a9989633c95b4063112690dc21c965d46bd5a7fff3c76"},
{file = "types_Pillow-9.4.0.5-py3-none-any.whl", hash = "sha256:a1d2b3e070b4d852af04f76f018d12bd51abb4abca3b725d91b35e01cda7a2de"},
{file = "types-Pillow-9.4.0.13.tar.gz", hash = "sha256:4510aa98a28947bf63f2b29edebbd11b7cff8647d90b867cec9b3674c0a8c321"},
{file = "types_Pillow-9.4.0.13-py3-none-any.whl", hash = "sha256:14a8a19021b8fe569a9fef9edc64a8d8a4aef340e38669d4fb3dc05cfd941130"},
]
[[package]]
@ -2697,14 +2699,14 @@ files = [
[[package]]
name = "types-requests"
version = "2.28.11.8"
version = "2.28.11.12"
description = "Typing stubs for requests"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-requests-2.28.11.8.tar.gz", hash = "sha256:e67424525f84adfbeab7268a159d3c633862dafae15c5b19547ce1b55954f0a3"},
{file = "types_requests-2.28.11.8-py3-none-any.whl", hash = "sha256:61960554baca0008ae7e2db2bd3b322ca9a144d3e80ce270f5fb640817e40994"},
{file = "types-requests-2.28.11.12.tar.gz", hash = "sha256:fd530aab3fc4f05ee36406af168f0836e6f00f1ee51a0b96b7311f82cb675230"},
{file = "types_requests-2.28.11.12-py3-none-any.whl", hash = "sha256:dbc2933635860e553ffc59f5e264264981358baffe6342b925e3eb8261f866ee"},
]
[package.dependencies]
@ -2712,14 +2714,14 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
version = "67.1.0.0"
version = "67.3.0.1"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-setuptools-67.1.0.0.tar.gz", hash = "sha256:162a39d22e3a5eb802197c84f16b19e798101bbd33d9437837fbb45627da5627"},
{file = "types_setuptools-67.1.0.0-py3-none-any.whl", hash = "sha256:5bd7a10d93e468bfcb10d24cb8ea5e12ac4f4ac91267293959001f1448cf0619"},
{file = "types-setuptools-67.3.0.1.tar.gz", hash = "sha256:1a26d373036c720e566823b6edd664a2db4d138b6eeba856721ec1254203474f"},
{file = "types_setuptools-67.3.0.1-py3-none-any.whl", hash = "sha256:a7e0f0816b5b449f5bcdc0efa43da91ff81dbe6941f293a6490d68a450e130a1"},
]
[package.dependencies]
@ -3028,4 +3030,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.7.1"
content-hash = "2673ef0530a42dae1df998bacfcaf88a563529b39461003a980743a97f02996f"
content-hash = "e12077711e5ff83f3c6038ea44c37bd49773799ec8245035b01094b7800c5c92"

View File

@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.77.0rc2"
version = "1.78.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@ -154,7 +154,9 @@ python = "^3.7.1"
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
frozendict = ">=1,!=2.1.2"
# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this
# because frozendict has been more trouble than it's worth; we would like to move to immutabledict.
frozendict = ">=1,!=2.1.2,<2.3.5"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
# We require 1.5.0 to work around an issue when running against the C implementation of
@ -311,7 +313,7 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
ruff = "0.0.230"
ruff = "0.0.237"
# Typechecking
mypy = "*"
@ -346,6 +348,9 @@ twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
# #13849 and #14079 where we see buildtime or runtime errors caused by build

View File

@ -24,7 +24,7 @@ anyhow = "1.0.63"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] }
pyo3-log = "0.7.0"
pyo3-log = "0.8.1"
pythonize = "0.17.0"
regex = "1.6.0"
serde = { version = "1.0.144", features = ["derive"] }

View File

@ -15,7 +15,8 @@
#![feature(test)]
use std::collections::BTreeSet;
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
PushRules, SimpleJsonValue,
};
use test::Bencher;
@ -24,9 +25,18 @@ extern crate test;
#[bench]
fn bench_match_exact(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
.collect();
@ -35,7 +45,6 @@ fn bench_match_exact(b: &mut Bencher) {
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@ -43,6 +52,8 @@ fn bench_match_exact(b: &mut Bencher) {
true,
vec![],
false,
false,
false,
)
.unwrap();
@ -63,9 +74,18 @@ fn bench_match_exact(b: &mut Bencher) {
#[bench]
fn bench_match_word(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
.collect();
@ -74,7 +94,6 @@ fn bench_match_word(b: &mut Bencher) {
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@ -82,6 +101,8 @@ fn bench_match_word(b: &mut Bencher) {
true,
vec![],
false,
false,
false,
)
.unwrap();
@ -102,9 +123,18 @@ fn bench_match_word(b: &mut Bencher) {
#[bench]
fn bench_match_word_miss(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
.collect();
@ -113,7 +143,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@ -121,6 +150,8 @@ fn bench_match_word_miss(b: &mut Bencher) {
true,
vec![],
false,
false,
false,
)
.unwrap();
@ -141,9 +172,18 @@ fn bench_match_word_miss(b: &mut Bencher) {
#[bench]
fn bench_eval_message(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
.collect();
@ -152,7 +192,6 @@ fn bench_eval_message(b: &mut Bencher) {
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@ -160,6 +199,8 @@ fn bench_eval_message(b: &mut Bencher) {
true,
vec![],
false,
false,
false,
)
.unwrap();

View File

@ -21,13 +21,13 @@ use lazy_static::lazy_static;
use serde_json::Value;
use super::KnownCondition;
use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
use crate::push::RelatedEventMatchCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue};
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
@ -168,7 +168,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::IsRoomMention),
Condition::Known(KnownCondition::ExactEventMatch(ExactEventMatchCondition {
key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"),
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
})),
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
}),
@ -223,7 +226,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},

View File

@ -14,6 +14,7 @@
use std::collections::{BTreeMap, BTreeSet};
use crate::push::JsonValue;
use anyhow::{Context, Error};
use lazy_static::lazy_static;
use log::warn;
@ -22,8 +23,8 @@ use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
RelatedEventMatchCondition,
Action, Condition, EventMatchCondition, ExactEventMatchCondition, FilteredPushRules,
KnownCondition, RelatedEventMatchCondition, SimpleJsonValue,
};
lazy_static! {
@ -61,9 +62,9 @@ impl RoomVersionFeatures {
/// Allows running a set of push rules against a particular event.
#[pyclass]
pub struct PushRuleEvaluator {
/// A mapping of "flattened" keys to string values in the event, e.g.
/// A mapping of "flattened" keys to simple JSON values in the event, e.g.
/// includes things like "type" and "content.msgtype".
flattened_keys: BTreeMap<String, String>,
flattened_keys: BTreeMap<String, JsonValue>,
/// The "content.body", if any.
body: String,
@ -72,8 +73,6 @@ pub struct PushRuleEvaluator {
has_mentions: bool,
/// The user mentions that were part of the message.
user_mentions: BTreeSet<String>,
/// True if the message is a room message.
room_mention: bool,
/// The number of users in the room.
room_member_count: u64,
@ -87,7 +86,7 @@ pub struct PushRuleEvaluator {
/// The related events, indexed by relation type. Flattened in the same manner as
/// `flattened_keys`.
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
related_events_flattened: BTreeMap<String, BTreeMap<String, JsonValue>>,
/// If msc3664, push rules for related events, is enabled.
related_event_match_enabled: bool,
@ -98,6 +97,12 @@ pub struct PushRuleEvaluator {
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
/// If MSC3758 (exact_event_match push rule condition) is enabled.
msc3758_exact_event_match: bool,
/// If MSC3966 (exact_event_property_contains push rule condition) is enabled.
msc3966_exact_event_property_contains: bool,
}
#[pymethods]
@ -106,29 +111,29 @@ impl PushRuleEvaluator {
#[allow(clippy::too_many_arguments)]
#[new]
pub fn py_new(
flattened_keys: BTreeMap<String, String>,
flattened_keys: BTreeMap<String, JsonValue>,
has_mentions: bool,
user_mentions: BTreeSet<String>,
room_mention: bool,
room_member_count: u64,
sender_power_level: Option<i64>,
notification_power_levels: BTreeMap<String, i64>,
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
related_events_flattened: BTreeMap<String, BTreeMap<String, JsonValue>>,
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
msc3758_exact_event_match: bool,
msc3966_exact_event_property_contains: bool,
) -> Result<Self, Error> {
let body = flattened_keys
.get("content.body")
.cloned()
.unwrap_or_default();
let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
_ => String::new(),
};
Ok(PushRuleEvaluator {
flattened_keys,
body,
has_mentions,
user_mentions,
room_mention,
room_member_count,
notification_power_levels,
sender_power_level,
@ -136,6 +141,8 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc3758_exact_event_match,
msc3966_exact_event_property_contains,
})
}
@ -252,9 +259,15 @@ impl PushRuleEvaluator {
KnownCondition::EventMatch(event_match) => {
self.match_event_match(event_match, user_id)?
}
KnownCondition::ExactEventMatch(exact_event_match) => {
self.match_exact_event_match(exact_event_match)?
}
KnownCondition::RelatedEventMatch(event_match) => {
self.match_related_event_match(event_match, user_id)?
}
KnownCondition::ExactEventPropertyContains(exact_event_match) => {
self.match_exact_event_property_contains(exact_event_match)?
}
KnownCondition::IsUserMention => {
if let Some(uid) = user_id {
self.user_mentions.contains(uid)
@ -262,7 +275,6 @@ impl PushRuleEvaluator {
false
}
}
KnownCondition::IsRoomMention => self.room_mention,
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
if !dn.is_empty() {
@ -337,7 +349,9 @@ impl PushRuleEvaluator {
return Ok(false);
};
let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) =
self.flattened_keys.get(&*event_match.key)
{
haystack
} else {
return Ok(false);
@ -355,6 +369,29 @@ impl PushRuleEvaluator {
compiled_pattern.is_match(haystack)
}
/// Evaluates a `exact_event_match` condition. (MSC3758)
fn match_exact_event_match(
&self,
exact_event_match: &ExactEventMatchCondition,
) -> Result<bool, Error> {
// First check if the feature is enabled.
if !self.msc3758_exact_event_match {
return Ok(false);
}
let value = &exact_event_match.value;
let haystack = if let Some(JsonValue::Value(haystack)) =
self.flattened_keys.get(&*exact_event_match.key)
{
haystack
} else {
return Ok(false);
};
Ok(haystack == &**value)
}
/// Evaluates a `related_event_match` condition. (MSC3664)
fn match_related_event_match(
&self,
@ -410,11 +447,12 @@ impl PushRuleEvaluator {
return Ok(false);
};
let haystack = if let Some(haystack) = event.get(&**key) {
haystack
} else {
return Ok(false);
};
let haystack =
if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = event.get(&**key) {
haystack
} else {
return Ok(false);
};
// For the content.body we match against "words", but for everything
// else we match against the entire value.
@ -428,6 +466,29 @@ impl PushRuleEvaluator {
compiled_pattern.is_match(haystack)
}
/// Evaluates a `exact_event_property_contains` condition. (MSC3758)
fn match_exact_event_property_contains(
&self,
exact_event_match: &ExactEventMatchCondition,
) -> Result<bool, Error> {
// First check if the feature is enabled.
if !self.msc3966_exact_event_property_contains {
return Ok(false);
}
let value = &exact_event_match.value;
let haystack = if let Some(JsonValue::Array(haystack)) =
self.flattened_keys.get(&*exact_event_match.key)
{
haystack
} else {
return Ok(false);
};
Ok(haystack.contains(&**value))
}
/// Match the member count against an 'is' condition
/// The `is` condition can be things like '>2', '==3' or even just '4'.
fn match_member_count(&self, is: &str) -> Result<bool, Error> {
@ -455,12 +516,14 @@ impl PushRuleEvaluator {
#[test]
fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
BTreeMap::new(),
@ -468,6 +531,8 @@ fn push_rule_evaluator() {
true,
vec![],
true,
true,
true,
)
.unwrap();
@ -482,13 +547,15 @@ fn test_requires_room_version_supports_condition() {
use crate::push::{PushRule, PushRules};
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
BTreeMap::new(),
@ -496,6 +563,8 @@ fn test_requires_room_version_supports_condition() {
false,
flags,
true,
true,
true,
)
.unwrap();

View File

@ -56,7 +56,9 @@ use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Error};
use log::warn;
use pyo3::exceptions::PyTypeError;
use pyo3::prelude::*;
use pyo3::types::{PyBool, PyList, PyLong, PyString};
use pythonize::{depythonize, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
@ -248,6 +250,65 @@ impl<'de> Deserialize<'de> for Action {
}
}
/// A simple JSON values (string, int, boolean, or null).
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum SimpleJsonValue {
Str(String),
Int(i64),
Bool(bool),
Null,
}
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Str(s.to_string()))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Bool(b.extract()?))
} else if let Ok(i) = <PyLong as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Int(i.extract()?))
} else if ob.is_none() {
Ok(SimpleJsonValue::Null)
} else {
Err(PyTypeError::new_err(format!(
"Can't convert from {} to SimpleJsonValue",
ob.get_type().name()?
)))
}
}
}
/// A JSON values (list, string, int, boolean, or null).
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum JsonValue {
Array(Vec<SimpleJsonValue>),
Value(SimpleJsonValue),
}
impl<'source> FromPyObject<'source> for JsonValue {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(l) = <PyList as pyo3::PyTryFrom>::try_from(ob) {
match l.iter().map(SimpleJsonValue::extract).collect() {
Ok(a) => Ok(JsonValue::Array(a)),
Err(e) => Err(PyTypeError::new_err(format!(
"Can't convert to JsonValue::Array: {}",
e
))),
}
} else if let Ok(v) = SimpleJsonValue::extract(ob) {
Ok(JsonValue::Value(v))
} else {
Err(PyTypeError::new_err(format!(
"Can't convert from {} to JsonValue",
ob.get_type().name()?
)))
}
}
}
/// A condition used in push rules to match against an event.
///
/// We need this split as `serde` doesn't give us the ability to have a
@ -267,12 +328,14 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
#[serde(rename = "com.beeper.msc3758.exact_event_match")]
ExactEventMatch(ExactEventMatchCondition),
#[serde(rename = "im.nheko.msc3664.related_event_match")]
RelatedEventMatch(RelatedEventMatchCondition),
#[serde(rename = "org.matrix.msc3966.exact_event_property_contains")]
ExactEventPropertyContains(ExactEventMatchCondition),
#[serde(rename = "org.matrix.msc3952.is_user_mention")]
IsUserMention,
#[serde(rename = "org.matrix.msc3952.is_room_mention")]
IsRoomMention,
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@ -309,6 +372,13 @@ pub struct EventMatchCondition {
pub pattern_type: Option<Cow<'static, str>>,
}
/// The body of a [`Condition::ExactEventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExactEventMatchCondition {
pub key: Cow<'static, str>,
pub value: Cow<'static, SimpleJsonValue>,
}
/// The body of a [`Condition::RelatedEventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RelatedEventMatchCondition {
@ -542,6 +612,48 @@ fn test_deserialize_unstable_msc3931_condition() {
));
}
#[test]
fn test_deserialize_unstable_msc3758_condition() {
// A string condition should work.
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":"foo"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
));
// A boolean condition should work.
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":true}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
));
// An integer condition should work.
let json = r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":1}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
));
// A null condition should work
let json =
r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":null}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::ExactEventMatch(_))
));
}
#[test]
fn test_deserialize_unstable_msc3952_user_condition() {
let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#;
@ -553,17 +665,6 @@ fn test_deserialize_unstable_msc3952_user_condition() {
));
}
#[test]
fn test_deserialize_unstable_msc3952_room_condition() {
let json = r#"{"kind":"org.matrix.msc3952.is_room_mention"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::IsRoomMention)
));
}
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;

View File

@ -0,0 +1,58 @@
#! /usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
from typing import Dict, List
import tomli
def main() -> None:
lockfile_path = Path(__file__).parent.parent.joinpath("poetry.lock")
with open(lockfile_path, "rb") as lockfile:
lockfile_content = tomli.load(lockfile)
# Poetry 1.3+ lockfile format:
# There's a `files` inline table in each [[package]]
packages_to_assets: Dict[str, List[Dict[str, str]]] = {
package["name"]: package["files"] for package in lockfile_content["package"]
}
success = True
for package_name, assets in packages_to_assets.items():
has_sdist = any(asset["file"].endswith(".tar.gz") for asset in assets)
if not has_sdist:
success = False
print(
f"Locked package {package_name!r} does not have a source distribution!",
file=sys.stderr,
)
if not success:
print(
"\nThere were some problems with the Poetry lockfile (poetry.lock).",
file=sys.stderr,
)
sys.exit(1)
print(
f"Poetry lockfile OK. {len(packages_to_assets)} locked packages checked.",
file=sys.stderr,
)
if __name__ == "__main__":
main()

View File

@ -19,7 +19,8 @@ usage() {
echo "-c"
echo " CI mode. Prints every command that the script runs."
echo "-o <path>"
echo " Directory to output full schema files to."
echo " Directory to output full schema files to. You probably want to use"
echo " '-o synapse/storage/schema'"
echo "-n <schema number>"
echo " Schema number for the new snapshot. Used to set the location of files within "
echo " the output directory, mimicking that of synapse/storage/schemas."
@ -27,6 +28,11 @@ usage() {
echo "-h"
echo " Display this help text."
echo ""
echo ""
echo "You probably want to invoke this with something like"
echo " docker run --rm -e POSTGRES_PASSWORD=postgres -e POSTGRES_USER=postgres -e POSTGRES_DB=synapse -p 5432:5432 postgres:11-alpine"
echo " echo postgres | scripts-dev/make_full_schema.sh -p postgres -n MY_SCHEMA_NUMBER -o synapse/storage/schema"
echo ""
echo " NB: make sure to run this against the *oldest* supported version of postgres,"
echo " or else pg_dump might output non-backwards-compatible syntax."
}
@ -189,7 +195,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
poetry run python synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
# Create the PostgreSQL database.
echo "Creating postgres databases..."
@ -198,7 +204,7 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAM
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME"
echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
poetry run python synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
echo "Dropping unwanted db tables..."
@ -293,4 +299,12 @@ pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owne
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
if [[ "$OUTPUT_DIR" == *synapse/storage/schema ]]; then
echo "Updating contrib/datagrip symlinks..."
ln -sf "../../synapse/storage/schema/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/common.sql"
ln -sf "../../synapse/storage/schema/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/main.sql"
ln -sf "../../synapse/storage/schema/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/state.sql"
else
echo "Not updating contrib/datagrip symlinks (unknown output directory)"
fi
echo "Done! Files dumped to: $OUTPUT_DIR"

View File

@ -14,7 +14,7 @@
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
from synapse.types import JsonDict
from synapse.types import JsonDict, JsonValue
class PushRule:
@property
@ -56,17 +56,18 @@ def get_base_rule_ids() -> Collection[str]: ...
class PushRuleEvaluator:
def __init__(
self,
flattened_keys: Mapping[str, str],
flattened_keys: Mapping[str, JsonValue],
has_mentions: bool,
user_mentions: Set[str],
room_mention: bool,
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
related_events_flattened: Mapping[str, Mapping[str, str]],
related_events_flattened: Mapping[str, Mapping[str, JsonValue]],
related_event_match_enabled: bool,
room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool,
msc3758_exact_event_match: bool,
msc3966_exact_event_property_contains: bool,
): ...
def run(
self,

View File

@ -94,61 +94,80 @@ reactor = cast(ISynapseReactor, reactor_)
logger = logging.getLogger("synapse_port_db")
# SQLite doesn't have a dedicated boolean type (it stores True/False as 1/0). This means
# portdb will read sqlite bools as integers, then try to insert them into postgres
# boolean columns---which fails. Lacking some Python-parseable metaschema, we must
# specify which integer columns should be inserted as booleans into postgres.
BOOLEAN_COLUMNS = {
"events": ["processed", "outlier", "contains_url"],
"rooms": ["is_public", "has_auth_chain_index"],
"access_tokens": ["used"],
"account_validity": ["email_sent"],
"device_lists_changes_in_room": ["converted_to_destinations"],
"device_lists_outbound_pokes": ["sent"],
"devices": ["hidden"],
"e2e_fallback_keys_json": ["used"],
"e2e_room_keys": ["is_verified"],
"event_edges": ["is_state"],
"events": ["processed", "outlier", "contains_url"],
"local_media_repository": ["safe_from_quarantine"],
"presence_list": ["accepted"],
"presence_stream": ["currently_active"],
"public_room_list_stream": ["visibility"],
"devices": ["hidden"],
"device_lists_outbound_pokes": ["sent"],
"users_who_share_rooms": ["share_private"],
"e2e_room_keys": ["is_verified"],
"account_validity": ["email_sent"],
"pushers": ["enabled"],
"redactions": ["have_censored"],
"room_stats_state": ["is_federatable"],
"local_media_repository": ["safe_from_quarantine"],
"rooms": ["is_public", "has_auth_chain_index"],
"users": ["shadow_banned", "approved"],
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
"pushers": ["enabled"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],
}
# These tables are never deleted from in normal operation [*], so we can resume porting
# over rows from a previous attempt rather than starting from scratch.
#
# [*]: We do delete from many of these tables when purging a room, and
# presumably when purging old events. So we might e.g.
#
# 1. Run portdb and port half of some table.
# 2. Stop portdb.
# 3. Purge something, deleting some of the rows we've ported over.
# 4. Restart portdb. The rows deleted from sqlite are still present in postgres.
#
# But this isn't the end of the world: we should be able to repeat the purge
# on the postgres DB when porting completes.
APPEND_ONLY_TABLES = [
"event_reference_hashes",
"events",
"cache_invalidation_stream_by_instance",
"event_auth",
"event_edges",
"event_json",
"state_events",
"room_memberships",
"topics",
"room_names",
"rooms",
"event_reference_hashes",
"event_search",
"event_to_state_groups",
"events",
"ex_outlier_stream",
"local_media_repository",
"local_media_repository_thumbnails",
"presence_stream",
"public_room_list_stream",
"push_rules_stream",
"received_transactions",
"redactions",
"rejections",
"remote_media_cache",
"remote_media_cache_thumbnails",
"redactions",
"event_edges",
"event_auth",
"received_transactions",
"room_memberships",
"room_names",
"rooms",
"sent_transactions",
"transaction_id_to_pdu",
"users",
"state_events",
"state_group_edges",
"state_groups",
"state_groups_state",
"event_to_state_groups",
"rejections",
"event_search",
"presence_stream",
"push_rules_stream",
"ex_outlier_stream",
"cache_invalidation_stream_by_instance",
"public_room_list_stream",
"state_group_edges",
"stream_ordering_to_exterm",
"topics",
"transaction_id_to_pdu",
"un_partial_stated_event_stream",
"users",
]

View File

@ -32,7 +32,6 @@ from synapse.appservice import ApplicationService
from synapse.http import get_request_user_agent
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import (
SynapseTags,
active_span,
force_tracing,
start_active_span,
@ -162,12 +161,6 @@ class Auth:
parent_span.set_tag(
"authenticated_entity", requester.authenticated_entity
)
# We tag the Synapse instance name so that it's an easy jumping
# off point into the logs. Can also be used to filter for an
# instance that is under load.
parent_span.set_tag(
SynapseTags.INSTANCE_NAME, self.hs.get_instance_name()
)
parent_span.set_tag("user_id", requester.user.to_string())
if requester.device_id is not None:
parent_span.set_tag("device_id", requester.device_id)

View File

@ -108,6 +108,10 @@ class Codes(str, Enum):
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
# Attempt to send a second annotation with the same event type & annotation key
# MSC2677
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
@ -751,3 +755,25 @@ class ModuleFailedException(Exception):
Raised when a module API callback fails, for example because it raised an
exception.
"""
class PartialStateConflictError(SynapseError):
"""An internal error raised when attempting to persist an event with partial state
after the room containing the event has been un-partial stated.
This error should be handled by recomputing the event context and trying again.
This error has an HTTP status code so that it can be transported over replication.
It should not be exposed to clients.
"""
@staticmethod
def message() -> str:
return "Cannot persist partial state event in un-partial stated room"
def __init__(self) -> None:
super().__init__(
HTTPStatus.CONFLICT,
msg=PartialStateConflictError.message(),
errcode=Codes.UNKNOWN,
)

View File

@ -219,9 +219,13 @@ class FilterCollection:
self._room_timeline_filter = Filter(hs, room_filter_json.get("timeline", {}))
self._room_state_filter = Filter(hs, room_filter_json.get("state", {}))
self._room_ephemeral_filter = Filter(hs, room_filter_json.get("ephemeral", {}))
self._room_account_data = Filter(hs, room_filter_json.get("account_data", {}))
self._room_account_data_filter = Filter(
hs, room_filter_json.get("account_data", {})
)
self._presence_filter = Filter(hs, filter_json.get("presence", {}))
self._account_data = Filter(hs, filter_json.get("account_data", {}))
self._global_account_data_filter = Filter(
hs, filter_json.get("account_data", {})
)
self.include_leave = filter_json.get("room", {}).get("include_leave", False)
self.event_fields = filter_json.get("event_fields", [])
@ -256,8 +260,10 @@ class FilterCollection:
) -> List[UserPresenceState]:
return await self._presence_filter.filter(presence_states)
async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return await self._account_data.filter(events)
async def filter_global_account_data(
self, events: Iterable[JsonDict]
) -> List[JsonDict]:
return await self._global_account_data_filter.filter(events)
async def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
return await self._room_state_filter.filter(
@ -279,7 +285,7 @@ class FilterCollection:
async def filter_room_account_data(
self, events: Iterable[JsonDict]
) -> List[JsonDict]:
return await self._room_account_data.filter(
return await self._room_account_data_filter.filter(
await self._room_filter.filter(events)
)
@ -292,6 +298,13 @@ class FilterCollection:
or self._presence_filter.filters_all_senders()
)
def blocks_all_global_account_data(self) -> bool:
"""True if all global acount data will be filtered out."""
return (
self._global_account_data_filter.filters_all_types()
or self._global_account_data_filter.filters_all_senders()
)
def blocks_all_room_ephemeral(self) -> bool:
return (
self._room_ephemeral_filter.filters_all_types()
@ -299,6 +312,13 @@ class FilterCollection:
or self._room_ephemeral_filter.filters_all_rooms()
)
def blocks_all_room_account_data(self) -> bool:
return (
self._room_account_data_filter.filters_all_types()
or self._room_account_data_filter.filters_all_senders()
or self._room_account_data_filter.filters_all_rooms()
)
def blocks_all_room_timeline(self) -> bool:
return (
self._room_timeline_filter.filters_all_types()

View File

@ -17,7 +17,7 @@ import logging
import os
import sys
import tempfile
from typing import List, Optional
from typing import List, Mapping, Optional
from twisted.internet import defer, task
@ -222,6 +222,19 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(connection_file, "a") as f:
print(json.dumps(connection), file=f)
def write_account_data(
self, file_name: str, account_data: Mapping[str, JsonDict]
) -> None:
account_data_directory = os.path.join(
self.base_directory, "user_data", "account_data"
)
os.makedirs(account_data_directory, exist_ok=True)
account_data_file = os.path.join(account_data_directory, file_name)
with open(account_data_file, "a") as f:
print(json.dumps(account_data), file=f)
def finished(self) -> str:
return self.base_directory

View File

@ -15,7 +15,7 @@ import logging
import math
import resource
import sys
from typing import TYPE_CHECKING, List, Sized, Tuple
from typing import TYPE_CHECKING, List, Mapping, Sized, Tuple
from prometheus_client import Gauge
@ -194,7 +194,7 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
@wrap_as_background_process("generate_monthly_active_users")
async def generate_monthly_active_users() -> None:
current_mau_count = 0
current_mau_count_by_service = {}
current_mau_count_by_service: Mapping[str, int] = {}
reserved_users: Sized = ()
store = hs.get_datastores().main
if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:

View File

@ -169,12 +169,28 @@ class ExperimentalConfig(Config):
# MSC3925: do not replace events with their edits
self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False)
# MSC3952: Intentional mentions
self.msc3952_intentional_mentions = experimental.get(
"msc3952_intentional_mentions", False
# MSC3758: exact_event_match push rule condition
self.msc3758_exact_event_match = experimental.get(
"msc3758_exact_event_match", False
)
# MSC3873: Disambiguate event_match keys.
self.msc3783_escape_event_match_key = experimental.get(
"msc3783_escape_event_match_key", False
)
# MSC3952: Intentional mentions, this depends on MSC3758.
self.msc3952_intentional_mentions = (
experimental.get("msc3952_intentional_mentions", False)
and self.msc3758_exact_event_match
)
# MSC3959: Do not generate notifications for edits.
self.msc3958_supress_edit_notifs = experimental.get(
"msc3958_supress_edit_notifs", False
)
# MSC3966: exact_event_property_contains push rule condition.
self.msc3966_exact_event_property_contains = experimental.get(
"msc3966_exact_event_property_contains", False
)

View File

@ -33,4 +33,5 @@ class RedisConfig(Config):
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
self.redis_dbid = redis_config.get("dbid", None)
self.redis_password = redis_config.get("password")

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List
from typing import Any, Collection
from matrix_common.regex import glob_to_regex
@ -70,7 +70,7 @@ class RoomDirectoryConfig(Config):
return False
def is_publishing_room_allowed(
self, user_id: str, room_id: str, aliases: List[str]
self, user_id: str, room_id: str, aliases: Collection[str]
) -> bool:
"""Checks if the given user is allowed to publish the room
@ -122,7 +122,7 @@ class _RoomDirectoryRule:
except Exception as e:
raise ConfigError("Failed to parse glob into regex") from e
def matches(self, user_id: str, room_id: str, aliases: List[str]) -> bool:
def matches(self, user_id: str, room_id: str, aliases: Collection[str]) -> bool:
"""Tests if this rule matches the given user_id, room_id and aliases.
Args:

View File

@ -177,6 +177,7 @@ KNOWN_RESOURCES = {
"client",
"consent",
"federation",
"health",
"keys",
"media",
"metrics",

View File

@ -16,18 +16,7 @@
import collections.abc
import logging
import typing
from typing import (
Any,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@ -56,7 +45,13 @@ from synapse.api.room_versions import (
RoomVersions,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id
from synapse.types import (
MutableStateMap,
StateMap,
StrCollection,
UserID,
get_domain_from_id,
)
if typing.TYPE_CHECKING:
# conditional imports to avoid import cycle
@ -69,7 +64,7 @@ logger = logging.getLogger(__name__)
class _EventSourceStore(Protocol):
async def get_events(
self,
event_ids: Collection[str],
event_ids: StrCollection,
redact_behaviour: EventRedactBehaviour,
get_prev_content: bool = False,
allow_rejected: bool = False,

View File

@ -39,7 +39,7 @@ from unpaddedbase64 import encode_base64
from synapse.api.constants import RelationTypes
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.types import JsonDict, RoomStreamToken
from synapse.types import JsonDict, RoomStreamToken, StrCollection
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
from synapse.util.stringutils import strtobool
@ -413,7 +413,7 @@ class EventBase(metaclass=abc.ABCMeta):
"""
return [e for e, _ in self._dict["prev_events"]]
def auth_event_ids(self) -> Sequence[str]:
def auth_event_ids(self) -> StrCollection:
"""Returns the list of auth event IDs. The order matches the order
specified in the event, though there is no meaning to it.
@ -558,7 +558,7 @@ class FrozenEventV2(EventBase):
"""
return self._dict["prev_events"]
def auth_event_ids(self) -> Sequence[str]:
def auth_event_ids(self) -> StrCollection:
"""Returns the list of auth event IDs. The order matches the order
specified in the event, though there is no meaning to it.

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union
import attr
from signedjson.types import SigningKey
@ -103,7 +103,7 @@ class EventBuilder:
async def build(
self,
prev_event_ids: List[str],
prev_event_ids: Collection[str],
auth_event_ids: Optional[List[str]],
depth: Optional[int] = None,
) -> EventBase:
@ -136,7 +136,7 @@ class EventBuilder:
format_version = self.room_version.event_format
# The types of auth/prev events changes between event versions.
prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
prev_events: Union[Collection[str], List[Tuple[str, Dict[str, str]]]]
auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
if format_version == EventFormatVersions.ROOM_V1_V2:
auth_events = await self._store.add_event_hashes(auth_event_ids)

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional, Tuple
import attr
@ -26,8 +27,51 @@ if TYPE_CHECKING:
from synapse.types.state import StateFilter
class UnpersistedEventContextBase(ABC):
"""
This is a base class for EventContext and UnpersistedEventContext, objects which
hold information relevant to storing an associated event. Note that an
UnpersistedEventContexts must be converted into an EventContext before it is
suitable to send to the db with its associated event.
Attributes:
_storage: storage controllers for interfacing with the database
app_service: If the associated event is being sent by a (local) application service, that
app service.
"""
def __init__(self, storage_controller: "StorageControllers"):
self._storage: "StorageControllers" = storage_controller
self.app_service: Optional[ApplicationService] = None
@abstractmethod
async def persist(
self,
event: EventBase,
) -> "EventContext":
"""
A method to convert an UnpersistedEventContext to an EventContext, suitable for
sending to the database with the associated event.
"""
pass
@abstractmethod
async def get_prev_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:
"""
Gets the room state at the event (ie not including the event if the event is a
state event).
Args:
state_filter: specifies the type of state event to fetch from DB, example:
EventTypes.JoinRules
"""
pass
@attr.s(slots=True, auto_attribs=True)
class EventContext:
class EventContext(UnpersistedEventContextBase):
"""
Holds information relevant to persisting an event
@ -77,9 +121,6 @@ class EventContext:
delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
and ``state_group``.
app_service: If this event is being sent by a (local) application service, that
app service.
partial_state: if True, we may be storing this event with a temporary,
incomplete state.
"""
@ -122,6 +163,9 @@ class EventContext:
"""Return an EventContext instance suitable for persisting an outlier event"""
return EventContext(storage=storage)
async def persist(self, event: EventBase) -> "EventContext":
return self
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
@ -254,6 +298,128 @@ class EventContext:
)
@attr.s(slots=True, auto_attribs=True)
class UnpersistedEventContext(UnpersistedEventContextBase):
"""
The event context holds information about the state groups for an event. It is important
to remember that an event technically has two state groups: the state group before the
event, and the state group after the event. If the event is not a state event, the state
group will not change (ie the state group before the event will be the same as the state
group after the event), but if it is a state event the state group before the event
will differ from the state group after the event.
This is a version of an EventContext before the new state group (if any) has been
computed and stored. It contains information about the state before the event (which
also may be the information after the event, if the event is not a state event). The
UnpersistedEventContext must be converted into an EventContext by calling the method
'persist' on it before it is suitable to be sent to the DB for processing.
state_group_after_event:
The state group after the event. This will always be None until it is persisted.
If the event is not a state event, this will be the same as
state_group_before_event.
state_group_before_event:
The ID of the state group representing the state of the room before this event.
state_delta_due_to_event:
If the event is a state event, then this is the delta of the state between
`state_group` and `state_group_before_event`
prev_group_for_state_group_before_event:
If it is known, ``state_group_before_event``'s previous state group.
delta_ids_to_state_group_before_event:
If ``prev_group_for_state_group_before_event`` is not None, the state delta
between ``prev_group_for_state_group_before_event`` and ``state_group_before_event``.
partial_state:
Whether the event has partial state.
state_map_before_event:
A map of the state before the event, i.e. the state at `state_group_before_event`
"""
_storage: "StorageControllers"
state_group_before_event: Optional[int]
state_group_after_event: Optional[int]
state_delta_due_to_event: Optional[dict]
prev_group_for_state_group_before_event: Optional[int]
delta_ids_to_state_group_before_event: Optional[StateMap[str]]
partial_state: bool
state_map_before_event: Optional[StateMap[str]] = None
async def get_prev_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:
"""
Gets the room state map, excluding this event.
Args:
state_filter: specifies the type of state event to fetch from DB
Returns:
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
if self.state_map_before_event:
return self.state_map_before_event
assert self.state_group_before_event is not None
return await self._storage.state.get_state_ids_for_group(
self.state_group_before_event, state_filter
)
async def persist(self, event: EventBase) -> EventContext:
"""
Creates a full `EventContext` for the event, persisting any referenced state that
has not yet been persisted.
Args:
event: event that the EventContext is associated with.
Returns: An EventContext suitable for sending to the database with the event
for persisting
"""
assert self.partial_state is not None
# If we have a full set of state for before the event but don't have a state
# group for that state, we need to get one
if self.state_group_before_event is None:
assert self.state_map_before_event
state_group_before_event = await self._storage.state.store_state_group(
event.event_id,
event.room_id,
prev_group=self.prev_group_for_state_group_before_event,
delta_ids=self.delta_ids_to_state_group_before_event,
current_state_ids=self.state_map_before_event,
)
self.state_group_before_event = state_group_before_event
# if the event isn't a state event the state group doesn't change
if not self.state_delta_due_to_event:
state_group_after_event = self.state_group_before_event
# otherwise if it is a state event we need to get a state group for it
else:
state_group_after_event = await self._storage.state.store_state_group(
event.event_id,
event.room_id,
prev_group=self.state_group_before_event,
delta_ids=self.state_delta_due_to_event,
current_state_ids=None,
)
return EventContext.with_state(
storage=self._storage,
state_group=state_group_after_event,
state_group_before_event=self.state_group_before_event,
state_delta_due_to_event=self.state_delta_due_to_event,
partial_state=self.partial_state,
prev_group=self.state_group_before_event,
delta_ids=self.state_delta_due_to_event,
)
def _encode_state_dict(
state_dict: Optional[StateMap[str]],
) -> Optional[List[Tuple[str, str, str]]]:

View File

@ -18,7 +18,7 @@ from twisted.internet.defer import CancelledError
from synapse.api.errors import ModuleFailedException, SynapseError
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.snapshot import UnpersistedEventContextBase
from synapse.storage.roommember import ProfileInfo
from synapse.types import Requester, StateMap
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
@ -231,7 +231,9 @@ class ThirdPartyEventRules:
self._on_threepid_bind_callbacks.append(on_threepid_bind)
async def check_event_allowed(
self, event: EventBase, context: EventContext
self,
event: EventBase,
context: UnpersistedEventContextBase,
) -> Tuple[bool, Optional[dict]]:
"""Check if a provided event should be allowed in the given context.

View File

@ -884,7 +884,7 @@ class FederationClient(FederationBase):
if 500 <= e.code < 600:
failover = True
elif e.code == 400 and synapse_error.errcode in failover_errcodes:
elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes:
failover = True
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
@ -999,14 +999,13 @@ class FederationClient(FederationBase):
return destination, ev, room_version
failover_errcodes = {Codes.NOT_FOUND}
# MSC3083 defines additional error codes for room joins. Unfortunately
# we do not yet know the room version, assume these will only be returned
# by valid room versions.
failover_errcodes = (
(Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN)
if membership == Membership.JOIN
else None
)
if membership == Membership.JOIN:
failover_errcodes.add(Codes.UNABLE_AUTHORISE_JOIN)
failover_errcodes.add(Codes.UNABLE_TO_GRANT_JOIN)
return await self._try_destination_list(
"make_" + membership,

View File

@ -23,6 +23,7 @@ from typing import (
Collection,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
@ -47,6 +48,7 @@ from synapse.api.errors import (
FederationError,
IncompatibleRoomVersionError,
NotFoundError,
PartialStateConflictError,
SynapseError,
UnsupportedRoomVersionError,
)
@ -80,7 +82,6 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
@ -1512,7 +1513,7 @@ class FederationHandlerRegistry:
def _get_event_ids_for_partial_state_join(
join_event: EventBase,
prev_state_ids: StateMap[str],
summary: Dict[str, MemberSummary],
summary: Mapping[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be returned in a partial_state send_join

View File

@ -343,10 +343,12 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
}
)
(
account_data,
room_account_data,
) = await self.store.get_updated_account_data_for_user(user_id, last_stream_id)
account_data = await self.store.get_updated_global_account_data_for_user(
user_id, last_stream_id
)
room_account_data = await self.store.get_updated_room_account_data_for_user(
user_id, last_stream_id
)
for account_data_type, content in account_data.items():
results.append({"type": account_data_type, "content": content})

View File

@ -14,7 +14,7 @@
import abc
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set
from synapse.api.constants import Direction, Membership
from synapse.events import EventBase
@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
class AdminHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._store = hs.get_datastores().main
self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
@ -38,7 +38,7 @@ class AdminHandler:
async def get_whois(self, user: UserID) -> JsonDict:
connections = []
sessions = await self.store.get_user_ip_and_agents(user)
sessions = await self._store.get_user_ip_and_agents(user)
for session in sessions:
connections.append(
{
@ -57,7 +57,7 @@ class AdminHandler:
async def get_user(self, user: UserID) -> Optional[JsonDict]:
"""Function to get user details"""
user_info_dict = await self.store.get_user_by_id(user.to_string())
user_info_dict = await self._store.get_user_by_id(user.to_string())
if user_info_dict is None:
return None
@ -89,11 +89,11 @@ class AdminHandler:
}
# Add additional user metadata
profile = await self.store.get_profileinfo(user.localpart)
threepids = await self.store.user_get_threepids(user.to_string())
profile = await self._store.get_profileinfo(user.localpart)
threepids = await self._store.user_get_threepids(user.to_string())
external_ids = [
({"auth_provider": auth_provider, "external_id": external_id})
for auth_provider, external_id in await self.store.get_external_ids_by_user(
for auth_provider, external_id in await self._store.get_external_ids_by_user(
user.to_string()
)
]
@ -101,7 +101,7 @@ class AdminHandler:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
user_info_dict["erased"] = await self._store.is_user_erased(user.to_string())
return user_info_dict
@ -117,7 +117,7 @@ class AdminHandler:
The returned value is that returned by `writer.finished()`.
"""
# Get all rooms the user is in or has been in
rooms = await self.store.get_rooms_for_local_user_where_membership_is(
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
user_id,
membership_list=(
Membership.JOIN,
@ -131,7 +131,7 @@ class AdminHandler:
# We only try and fetch events for rooms the user has been in. If
# they've been e.g. invited to a room without joining then we handle
# those separately.
rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id)
rooms_user_has_been_in = await self._store.get_rooms_user_has_been_in(user_id)
for index, room in enumerate(rooms):
room_id = room.room_id
@ -140,7 +140,7 @@ class AdminHandler:
"[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms)
)
forgotten = await self.store.did_forget(user_id, room_id)
forgotten = await self._store.did_forget(user_id, room_id)
if forgotten:
logger.info("[%s] User forgot room %d, ignoring", user_id, room_id)
continue
@ -152,14 +152,14 @@ class AdminHandler:
if room.membership == Membership.INVITE:
event_id = room.event_id
invite = await self.store.get_event(event_id, allow_none=True)
invite = await self._store.get_event(event_id, allow_none=True)
if invite:
invited_state = invite.unsigned["invite_room_state"]
writer.write_invite(room_id, invite, invited_state)
if room.membership == Membership.KNOCK:
event_id = room.event_id
knock = await self.store.get_event(event_id, allow_none=True)
knock = await self._store.get_event(event_id, allow_none=True)
if knock:
knock_state = knock.unsigned["knock_room_state"]
writer.write_knock(room_id, knock, knock_state)
@ -170,7 +170,7 @@ class AdminHandler:
# were joined. We estimate that point by looking at the
# stream_ordering of the last membership if it wasn't a join.
if room.membership == Membership.JOIN:
stream_ordering = self.store.get_room_max_stream_ordering()
stream_ordering = self._store.get_room_max_stream_ordering()
else:
stream_ordering = room.stream_ordering
@ -197,7 +197,7 @@ class AdminHandler:
# events that we have and then filtering, this isn't the most
# efficient method perhaps but it does guarantee we get everything.
while True:
events, _ = await self.store.paginate_room_events(
events, _ = await self._store.paginate_room_events(
room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS
)
if not events:
@ -263,6 +263,13 @@ class AdminHandler:
connections["devices"][""]["sessions"][0]["connections"]
)
# Get all account data the user has global and in rooms
global_data = await self._store.get_global_account_data_for_user(user_id)
by_room_data = await self._store.get_room_account_data_for_user(user_id)
writer.write_account_data("global", global_data)
for room_id in by_room_data:
writer.write_account_data(room_id, by_room_data[room_id])
return writer.finished()
@ -340,6 +347,18 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
"""
raise NotImplementedError()
@abc.abstractmethod
def write_account_data(
self, file_name: str, account_data: Mapping[str, JsonDict]
) -> None:
"""Write the account data of a user.
Args:
file_name: file name to write data
account_data: mapping of global or room account_data
"""
raise NotImplementedError()
@abc.abstractmethod
def finished(self) -> Any:
"""Called when all data has successfully been exported and written.

View File

@ -201,7 +201,7 @@ class AuthHandler:
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
inst = auth_checker_class(hs)
if inst.is_enabled():
self.checkers[inst.AUTH_TYPE] = inst # type: ignore
self.checkers[inst.AUTH_TYPE] = inst
self.bcrypt_rounds = hs.config.registration.bcrypt_rounds
@ -1593,9 +1593,8 @@ class AuthHandler:
if medium == "email":
address = canonicalise_email(address)
identity_handler = self.hs.get_identity_handler()
result = await identity_handler.try_unbind_threepid(
user_id, {"medium": medium, "address": address, "id_server": id_server}
result = await self.hs.get_identity_handler().try_unbind_threepid(
user_id, medium, address, id_server
)
await self.store.user_delete_threepid(user_id, medium, address)

View File

@ -106,12 +106,7 @@ class DeactivateAccountHandler:
for threepid in threepids:
try:
result = await self._identity_handler.try_unbind_threepid(
user_id,
{
"medium": threepid["medium"],
"address": threepid["address"],
"id_server": id_server,
},
user_id, threepid["medium"], threepid["address"], id_server
)
identity_server_supports_unbinding &= result
except Exception:

View File

@ -14,7 +14,7 @@
import logging
import string
from typing import TYPE_CHECKING, Iterable, List, Optional
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from typing_extensions import Literal
@ -485,7 +485,8 @@ class DirectoryHandler:
)
)
if canonical_alias:
room_aliases.append(canonical_alias)
# Ensure we do not mutate room_aliases.
room_aliases = list(room_aliases) + [canonical_alias]
if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_aliases
@ -528,7 +529,7 @@ class DirectoryHandler:
async def get_aliases_for_room(
self, requester: Requester, room_id: str
) -> List[str]:
) -> Sequence[str]:
"""
Get a list of the aliases that currently point to this room on this server
"""

View File

@ -159,19 +159,22 @@ class E2eKeysHandler:
# A map of destination -> user ID -> device IDs.
remote_queries_not_in_cache: Dict[str, Dict[str, Iterable[str]]] = {}
if remote_queries:
query_list: List[Tuple[str, Optional[str]]] = []
user_ids = set()
user_and_device_ids: List[Tuple[str, str]] = []
for user_id, device_ids in remote_queries.items():
if device_ids:
query_list.extend(
user_and_device_ids.extend(
(user_id, device_id) for device_id in device_ids
)
else:
query_list.append((user_id, None))
user_ids.add(user_id)
(
user_ids_not_in_cache,
remote_results,
) = await self.store.get_user_devices_from_cache(query_list)
) = await self.store.get_user_devices_from_cache(
user_ids, user_and_device_ids
)
# Check that the homeserver still shares a room with all cached users.
# Note that this check may be slightly racy when a remote user leaves a

View File

@ -202,7 +202,7 @@ class EventAuthHandler:
state_ids: StateMap[str],
room_version: RoomVersion,
user_id: str,
prev_member_event: Optional[EventBase],
prev_membership: Optional[str],
) -> None:
"""
Check whether a user can join a room without an invite due to restricted join rules.
@ -214,15 +214,14 @@ class EventAuthHandler:
state_ids: The state of the room as it currently is.
room_version: The room version of the room being joined.
user_id: The user joining the room.
prev_member_event: The current membership event for this user.
prev_membership: The current membership state for this user. `None` if the
user has never joined the room (equivalent to "leave").
Raises:
AuthError if the user cannot join the room.
"""
# If the member is invited or currently joined, then nothing to do.
if prev_member_event and (
prev_member_event.membership in (Membership.JOIN, Membership.INVITE)
):
if prev_membership in (Membership.JOIN, Membership.INVITE):
return
# This is not a room with a restricted join rule, so we don't need to do the
@ -255,13 +254,14 @@ class EventAuthHandler:
)
async def has_restricted_join_rules(
self, state_ids: StateMap[str], room_version: RoomVersion
self, partial_state_ids: StateMap[str], room_version: RoomVersion
) -> bool:
"""
Return if the room has the proper join rules set for access via rooms.
Args:
state_ids: The state of the room as it currently is.
state_ids: The state of the room as it currently is. May be full or partial
state.
room_version: The room version of the room to query.
Returns:
@ -272,7 +272,7 @@ class EventAuthHandler:
return False
# If there's no join rule, then it defaults to invite (so this doesn't apply).
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
join_rules_event_id = partial_state_ids.get((EventTypes.JoinRules, ""), None)
if not join_rules_event_id:
return False

View File

@ -49,6 +49,7 @@ from synapse.api.errors import (
FederationPullAttemptBackoffError,
HttpResponseException,
NotFoundError,
PartialStateConflictError,
RequestSendFailed,
SynapseError,
)
@ -56,7 +57,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.crypto.event_signing import compute_event_signature
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.events.validator import EventValidator
from synapse.federation.federation_client import InvalidResponseError
from synapse.http.servlet import assert_params_in_dict
@ -68,7 +69,6 @@ from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationStoreRoomOnOutlierMembershipRestServlet,
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import JsonDict, StrCollection, get_domain_from_id
from synapse.types.state import StateFilter
@ -952,7 +952,20 @@ class FederationHandler:
#
# Note that this requires the /send_join request to come back to the
# same server.
prev_event_ids = None
if room_version.msc3083_join_rules:
# Note that the room's state can change out from under us and render our
# nice join rules-conformant event non-conformant by the time we build the
# event. When this happens, our validation at the end fails and we respond
# to the requesting server with a 403, which is misleading — it indicates
# that the user is not allowed to join the room and the joining server
# should not bother retrying via this homeserver or any others, when
# in fact we've just messed up with building the event.
#
# To reduce the likelihood of this race, we capture the forward extremities
# of the room (prev_event_ids) just before fetching the current state, and
# hope that the state we fetch corresponds to the prev events we chose.
prev_event_ids = await self.store.get_prev_events_for_room(room_id)
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id
)
@ -990,15 +1003,21 @@ class FederationHandler:
)
try:
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
(
event,
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(
builder=builder,
prev_event_ids=prev_event_ids,
)
except SynapseError as e:
logger.warning("Failed to create join to %s because %s", room_id, e)
raise
# Ensure the user can even join the room.
await self._federation_event_handler.check_join_restrictions(context, event)
await self._federation_event_handler.check_join_restrictions(
unpersisted_context, event
)
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
@ -1178,7 +1197,7 @@ class FederationHandler:
},
)
event, context = await self.event_creation_handler.create_new_client_event(
event, _ = await self.event_creation_handler.create_new_client_event(
builder=builder
)
@ -1228,12 +1247,13 @@ class FederationHandler:
},
)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
(
event,
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(builder=builder)
event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
event, context
event, unpersisted_context
)
if not event_allowed:
logger.warning("Creation of knock %s forbidden by third-party rules", event)
@ -1406,15 +1426,20 @@ class FederationHandler:
try:
(
event,
context,
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version_obj, event_dict, event, context
(
event,
unpersisted_context,
) = await self.add_display_name_to_third_party_invite(
room_version_obj, event_dict, event, unpersisted_context
)
context = await unpersisted_context.persist(event)
EventValidator().validate_new(event, self.config)
# We need to tell the transaction queue to send this out, even
@ -1483,14 +1508,19 @@ class FederationHandler:
try:
(
event,
context,
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version_obj, event_dict, event, context
(
event,
unpersisted_context,
) = await self.add_display_name_to_third_party_invite(
room_version_obj, event_dict, event, unpersisted_context
)
context = await unpersisted_context.persist(event)
try:
validate_event_for_room_version(event)
await self._event_auth_handler.check_auth_rules_from_context(event)
@ -1522,8 +1552,8 @@ class FederationHandler:
room_version_obj: RoomVersion,
event_dict: JsonDict,
event: EventBase,
context: EventContext,
) -> Tuple[EventBase, EventContext]:
context: UnpersistedEventContextBase,
) -> Tuple[EventBase, UnpersistedEventContextBase]:
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"],
@ -1557,11 +1587,14 @@ class FederationHandler:
room_version_obj, event_dict
)
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
(
event,
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(builder=builder)
EventValidator().validate_new(event, self.config)
return event, context
return event, unpersisted_context
async def _check_signature(self, event: EventBase, context: EventContext) -> None:
"""
@ -1861,6 +1894,11 @@ class FederationHandler:
logger.info("Updating current state for %s", room_id)
# TODO(faster_joins): notify workers in notify_room_un_partial_stated
# https://github.com/matrix-org/synapse/issues/12994
#
# NB: there's a potential race here. If room is purged just before we
# call this, we _might_ end up inserting rows into current_state_events.
# (The logic is hard to chase through.) We think this is fine, but if
# not the HS admin should purge the room again.
await self.state_handler.update_current_state(room_id)
logger.info("Handling any pending device list updates")

View File

@ -47,6 +47,7 @@ from synapse.api.errors import (
FederationError,
FederationPullAttemptBackoffError,
HttpResponseException,
PartialStateConflictError,
RequestSendFailed,
SynapseError,
)
@ -58,7 +59,7 @@ from synapse.event_auth import (
validate_event_for_room_version,
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import (
@ -74,7 +75,6 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEventsRestServlet,
)
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
PersistedEventPosition,
@ -426,7 +426,9 @@ class FederationEventHandler:
return event, context
async def check_join_restrictions(
self, context: EventContext, event: EventBase
self,
context: UnpersistedEventContextBase,
event: EventBase,
) -> None:
"""Check that restrictions in restricted join rules are matched
@ -439,16 +441,17 @@ class FederationEventHandler:
# Check if the user is already in the room or invited to the room.
user_id = event.state_key
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
prev_membership = None
if prev_member_event_id:
prev_member_event = await self._store.get_event(prev_member_event_id)
prev_membership = prev_member_event.membership
# Check if the member should be allowed access via membership in a space.
await self._event_auth_handler.check_restricted_join_rules(
prev_state_ids,
event.room_version,
user_id,
prev_member_event,
prev_membership,
)
@trace
@ -524,11 +527,57 @@ class FederationEventHandler:
"Peristing join-via-remote %s (partial_state: %s)", event, partial_state
)
with nested_logging_context(suffix=event.event_id):
if partial_state:
# When handling a second partial state join into a partial state room,
# the returned state will exclude the membership from the first join. To
# preserve prior memberships, we try to compute the partial state before
# the event ourselves if we know about any of the prev events.
#
# When we don't know about any of the prev events, it's fine to just use
# the returned state, since the new join will create a new forward
# extremity, and leave the forward extremity containing our prior
# memberships alone.
prev_event_ids = set(event.prev_event_ids())
seen_event_ids = await self._store.have_events_in_timeline(
prev_event_ids
)
missing_event_ids = prev_event_ids - seen_event_ids
state_maps_to_resolve: List[StateMap[str]] = []
# Fetch the state after the prev events that we know about.
state_maps_to_resolve.extend(
(
await self._state_storage_controller.get_state_groups_ids(
room_id, seen_event_ids, await_full_state=False
)
).values()
)
# When there are prev events we do not have the state for, we state
# resolve with the state returned by the remote homeserver.
if missing_event_ids or len(state_maps_to_resolve) == 0:
state_maps_to_resolve.append(
{(e.type, e.state_key): e.event_id for e in state}
)
state_ids_before_event = (
await self._state_resolution_handler.resolve_events_with_store(
event.room_id,
room_version.identifier,
state_maps_to_resolve,
event_map=None,
state_res_store=StateResolutionStore(self._store),
)
)
else:
state_ids_before_event = {
(e.type, e.state_key): e.event_id for e in state
}
context = await self._state_handler.compute_event_context(
event,
state_ids_before_event={
(e.type, e.state_key): e.event_id for e in state
},
state_ids_before_event=state_ids_before_event,
partial_state=partial_state,
)

View File

@ -219,28 +219,31 @@ class IdentityHandler:
data = json_decoder.decode(e.msg) # XXX WAT?
return data
async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool:
"""Attempt to remove a 3PID from an identity server, or if one is not provided, all
identity servers we're aware the binding is present on
async def try_unbind_threepid(
self, mxid: str, medium: str, address: str, id_server: Optional[str]
) -> bool:
"""Attempt to remove a 3PID from one or more identity servers.
Args:
mxid: Matrix user ID of binding to be removed
threepid: Dict with medium & address of binding to be
removed, and an optional id_server.
medium: The medium of the third-party ID.
address: The address of the third-party ID.
id_server: An identity server to attempt to unbind from. If None,
attempt to remove the association from all identity servers
known to potentially have it.
Raises:
SynapseError: If we failed to contact the identity server
SynapseError: If we failed to contact one or more identity servers.
Returns:
True on success, otherwise False if the identity
server doesn't support unbinding (or no identity server found to
contact).
True on success, otherwise False if the identity server doesn't
support unbinding (or no identity server to contact was found).
"""
if threepid.get("id_server"):
id_servers = [threepid["id_server"]]
if id_server:
id_servers = [id_server]
else:
id_servers = await self.store.get_id_servers_user_bound(
user_id=mxid, medium=threepid["medium"], address=threepid["address"]
mxid, medium, address
)
# We don't know where to unbind, so we don't have a choice but to return
@ -249,20 +252,21 @@ class IdentityHandler:
changed = True
for id_server in id_servers:
changed &= await self.try_unbind_threepid_with_id_server(
mxid, threepid, id_server
changed &= await self._try_unbind_threepid_with_id_server(
mxid, medium, address, id_server
)
return changed
async def try_unbind_threepid_with_id_server(
self, mxid: str, threepid: dict, id_server: str
async def _try_unbind_threepid_with_id_server(
self, mxid: str, medium: str, address: str, id_server: str
) -> bool:
"""Removes a binding from an identity server
Args:
mxid: Matrix user ID of binding to be removed
threepid: Dict with medium & address of binding to be removed
medium: The medium of the third-party ID
address: The address of the third-party ID
id_server: Identity server to unbind from
Raises:
@ -286,7 +290,7 @@ class IdentityHandler:
content = {
"mxid": mxid,
"threepid": {"medium": threepid["medium"], "address": threepid["address"]},
"threepid": {"medium": medium, "address": address},
}
# we abuse the federation http client to sign the request, but we have to send it
@ -319,12 +323,7 @@ class IdentityHandler:
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
await self.store.remove_user_bound_threepid(
user_id=mxid,
medium=threepid["medium"],
address=threepid["address"],
id_server=id_server,
)
await self.store.remove_user_bound_threepid(mxid, medium, address, id_server)
return changed

View File

@ -154,9 +154,8 @@ class InitialSyncHandler:
tags_by_room = await self.store.get_tags_for_user(user_id)
account_data, account_data_by_room = await self.store.get_account_data_for_user(
user_id
)
account_data = await self.store.get_global_account_data_for_user(user_id)
account_data_by_room = await self.store.get_room_account_data_for_user(user_id)
public_room_ids = await self.store.get_public_room_ids()

View File

@ -38,6 +38,7 @@ from synapse.api.errors import (
Codes,
ConsentNotGivenError,
NotFoundError,
PartialStateConflictError,
ShadowBanError,
SynapseError,
UnstableSpecAuthError,
@ -48,7 +49,7 @@ from synapse.api.urls import ConsentURIBuilder
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.events.utils import maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
@ -57,7 +58,6 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
MutableStateMap,
@ -499,9 +499,9 @@ class EventCreationHandler:
self.request_ratelimiter = hs.get_request_ratelimiter()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
self.limiter = Linearizer(max_count=5, name="room_event_creation_limit")
# We limit concurrent event creation for a room to 1. This prevents state resolution
# from occurring when sending bursts of events to a local room
self.limiter = Linearizer(max_count=1, name="room_event_creation_limit")
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
@ -708,7 +708,7 @@ class EventCreationHandler:
builder.internal_metadata.historical = historical
event, context = await self.create_new_client_event(
event, unpersisted_context = await self.create_new_client_event(
builder=builder,
requester=requester,
allow_no_prev_events=allow_no_prev_events,
@ -721,6 +721,8 @@ class EventCreationHandler:
current_state_group=current_state_group,
)
context = await unpersisted_context.persist(event)
# In an ideal world we wouldn't need the second part of this condition. However,
# this behaviour isn't spec'd yet, meaning we should be able to deactivate this
# behaviour. Another reason is that this code is also evaluated each time a new
@ -1083,13 +1085,14 @@ class EventCreationHandler:
state_map: Optional[StateMap[str]] = None,
for_batch: bool = False,
current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
) -> Tuple[EventBase, UnpersistedEventContextBase]:
"""Create a new event for a local client. If bool for_batch is true, will
create an event using the prev_event_ids, and will create an event context for
the event using the parameters state_map and current_state_group, thus these parameters
must be provided in this case if for_batch is True. The subsequently created event
and context are suitable for being batched up and bulk persisted to the database
with other similarly created events.
with other similarly created events. Note that this returns an UnpersistedEventContext,
which must be converted to an EventContext before it can be sent to the DB.
Args:
builder:
@ -1131,7 +1134,7 @@ class EventCreationHandler:
batch persisting
Returns:
Tuple of created event, context
Tuple of created event, UnpersistedEventContext
"""
# Strip down the state_event_ids to only what we need to auth the event.
# For example, we don't need extra m.room.member that don't match event.sender
@ -1192,9 +1195,16 @@ class EventCreationHandler:
event = await builder.build(
prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth
)
context = await self.state.compute_event_context_for_batched(
event, state_map, current_state_group
context: UnpersistedEventContextBase = (
await self.state.calculate_context_info(
event,
state_ids_before_event=state_map,
partial_state=False,
state_group_before_event=current_state_group,
)
)
else:
event = await builder.build(
prev_event_ids=prev_event_ids,
@ -1244,16 +1254,17 @@ class EventCreationHandler:
state_map_for_event[(data.event_type, data.state_key)] = state_id
context = await self.state.compute_event_context(
# TODO(faster_joins): check how MSC2716 works and whether we can have
# partial state here
# https://github.com/matrix-org/synapse/issues/13003
context = await self.state.calculate_context_info(
event,
state_ids_before_event=state_map_for_event,
# TODO(faster_joins): check how MSC2716 works and whether we can have
# partial state here
# https://github.com/matrix-org/synapse/issues/13003
partial_state=False,
)
else:
context = await self.state.compute_event_context(event)
context = await self.state.calculate_context_info(event)
if requester:
context.app_service = requester.app_service
@ -1326,7 +1337,11 @@ class EventCreationHandler:
relation.parent_id, event.type, aggregation_key, event.sender
)
if already_exists:
raise SynapseError(400, "Can't send same reaction twice")
raise SynapseError(
400,
"Can't send same reaction twice",
errcode=Codes.DUPLICATE_ANNOTATION,
)
# Don't attempt to start a thread if the parent event is a relation.
elif relation.rel_type == RelationTypes.THREAD:
@ -2082,9 +2097,9 @@ class EventCreationHandler:
async def _rebuild_event_after_third_party_rules(
self, third_party_result: dict, original_event: EventBase
) -> Tuple[EventBase, EventContext]:
) -> Tuple[EventBase, UnpersistedEventContextBase]:
# the third_party_event_rules want to replace the event.
# we do some basic checks, and then return the replacement event and context.
# we do some basic checks, and then return the replacement event.
# Construct a new EventBuilder and validate it, which helps with the
# rest of these checks.
@ -2138,5 +2153,6 @@ class EventCreationHandler:
# we rebuild the event context, to be on the safe side. If nothing else,
# delta_ids might need an update.
context = await self.state.compute_event_context(event)
context = await self.state.calculate_context_info(event)
return event, context

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Tuple
from synapse.api.constants import EduTypes, ReceiptTypes
from synapse.appservice import ApplicationService
@ -189,7 +189,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
@staticmethod
def filter_out_private_receipts(
rooms: List[JsonDict], user_id: str
rooms: Sequence[JsonDict], user_id: str
) -> List[JsonDict]:
"""
Filters a list of serialized receipts (as returned by /sync and /initialSync)

View File

@ -43,6 +43,7 @@ from synapse.api.errors import (
Codes,
LimitExceededError,
NotFoundError,
PartialStateConflictError,
StoreError,
SynapseError,
)
@ -54,7 +55,6 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
@ -1076,7 +1076,7 @@ class RoomCreationHandler:
state_map: MutableStateMap[str] = {}
# current_state_group of last event created. Used for computing event context of
# events to be batched
current_state_group = None
current_state_group: Optional[int] = None
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content}
@ -1928,6 +1928,6 @@ class RoomShutdownHandler:
return {
"kicked_users": kicked_users,
"failed_to_kick_users": failed_to_kick_users,
"local_aliases": aliases_for_room,
"local_aliases": list(aliases_for_room),
"new_room_id": new_room_id,
}

View File

@ -26,7 +26,13 @@ from synapse.api.constants import (
GuestAccess,
Membership,
)
from synapse.api.errors import AuthError, Codes, ShadowBanError, SynapseError
from synapse.api.errors import (
AuthError,
Codes,
PartialStateConflictError,
ShadowBanError,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
@ -34,7 +40,6 @@ from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.logging import opentracing
from synapse.module_api import NOT_SPAM
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.types import (
JsonDict,
Requester,
@ -56,6 +61,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class NoKnownServersError(SynapseError):
"""No server already resident to the room was provided to the join/knock operation."""
def __init__(self, msg: str = "No known servers"):
super().__init__(404, msg)
class RoomMemberHandler(metaclass=abc.ABCMeta):
# TODO(paul): This handler currently contains a messy conflation of
# low-level API that works on UserID objects and so on, and REST-level
@ -185,6 +197,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id: Room that we are trying to join
user: User who is trying to join
content: A dict that should be used as the content of the join event.
Raises:
NoKnownServersError: if remote_room_hosts does not contain a server joined to
the room.
"""
raise NotImplementedError()
@ -484,7 +500,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
user_id: The user's ID.
"""
# Retrieve user account data for predecessor room
user_account_data, _ = await self.store.get_account_data_for_user(user_id)
user_account_data = await self.store.get_global_account_data_for_user(user_id)
# Copy direct message state if applicable
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
@ -837,14 +853,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
state_before_join = await self.state_handler.compute_state_after_events(
room_id, latest_event_ids
is_partial_state_room = await self.store.is_partial_state_room(room_id)
partial_state_before_join = await self.state_handler.compute_state_after_events(
room_id, latest_event_ids, await_full_state=False
)
# `is_partial_state_room` also indicates whether `partial_state_before_join` is
# partial.
# TODO: Refactor into dictionary of explicitly allowed transitions
# between old and new state, with specific error messages for some
# transitions and generic otherwise
old_state_id = state_before_join.get((EventTypes.Member, target.to_string()))
old_state_id = partial_state_before_join.get(
(EventTypes.Member, target.to_string())
)
if old_state_id:
old_state = await self.store.get_event(old_state_id, allow_none=True)
old_membership = old_state.content.get("membership") if old_state else None
@ -895,11 +916,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if action == "kick":
raise AuthError(403, "The target user is not in the room")
is_host_in_room = await self._is_host_in_room(state_before_join)
is_host_in_room = await self._is_host_in_room(partial_state_before_join)
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(state_before_join)
guest_can_join = await self._can_guest_join(partial_state_before_join)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
@ -941,8 +962,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id,
remote_room_hosts,
content,
is_partial_state_room,
is_host_in_room,
state_before_join,
partial_state_before_join,
)
if remote_join:
if ratelimit:
@ -1087,8 +1109,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id: str,
remote_room_hosts: List[str],
content: JsonDict,
is_partial_state_room: bool,
is_host_in_room: bool,
state_before_join: StateMap[str],
partial_state_before_join: StateMap[str],
) -> Tuple[bool, List[str]]:
"""
Check whether the server should do a remote join (as opposed to a local
@ -1107,9 +1130,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
remote_room_hosts: A list of remote room hosts.
content: The content to use as the event body of the join. This may
be modified.
is_host_in_room: True if the host is in the room.
state_before_join: The state before the join event (i.e. the resolution of
the states after its parent events).
is_partial_state_room: `True` if the server currently doesn't hold the full
state of the room.
is_host_in_room: `True` if the host is in the room.
partial_state_before_join: The state before the join event (i.e. the
resolution of the states after its parent events). May be full or
partial state, depending on `is_partial_state_room`.
Returns:
A tuple of:
@ -1123,6 +1149,23 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if not is_host_in_room:
return True, remote_room_hosts
prev_member_event_id = partial_state_before_join.get(
(EventTypes.Member, user_id), None
)
previous_membership = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
previous_membership = prev_member_event.membership
# If we are not fully joined yet, and the target is not already in the room,
# let's do a remote join so another server with the full state can validate
# that the user has not been banned for example.
# We could just accept the join and wait for state res to resolve that later on
# but we would then leak room history to this person until then, which is pretty
# bad.
if is_partial_state_room and previous_membership != Membership.JOIN:
return True, remote_room_hosts
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
@ -1130,21 +1173,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# If restricted join rules are not being used, a local join can always
# be used.
if not await self.event_auth_handler.has_restricted_join_rules(
state_before_join, room_version
partial_state_before_join, room_version
):
return False, []
# If the user is invited to the room or already joined, the join
# event can always be issued locally.
prev_member_event_id = state_before_join.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership in (
Membership.JOIN,
Membership.INVITE,
):
return False, []
if previous_membership in (Membership.JOIN, Membership.INVITE):
return False, []
# All the partial state cases are covered above. We have been given the full
# state of the room.
assert not is_partial_state_room
state_before_join = partial_state_before_join
# If the local host has a user who can issue invites, then a local
# join can be done.
@ -1168,7 +1209,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# Ensure the member should be allowed access via membership in a room.
await self.event_auth_handler.check_restricted_join_rules(
state_before_join, room_version, user_id, prev_member_event
state_before_join, room_version, user_id, previous_membership
)
# If this is going to be a local join, additional information must
@ -1318,11 +1359,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target_user, room_id)
async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool:
async def _can_guest_join(self, partial_current_state_ids: StateMap[str]) -> bool:
"""
Returns whether a guest can join a room based on its current state.
Args:
partial_current_state_ids: The current state of the room. May be full or
partial state.
"""
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
guest_access_id = partial_current_state_ids.get(
(EventTypes.GuestAccess, ""), None
)
if not guest_access_id:
return False
@ -1648,19 +1695,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
return event, stream_id
async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool:
async def _is_host_in_room(self, partial_current_state_ids: StateMap[str]) -> bool:
"""Returns whether the homeserver is in the room based on its current state.
Args:
partial_current_state_ids: The current state of the room. May be full or
partial state.
"""
# Have we just created the room, and is this about to be the very
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
create_event_id = partial_current_state_ids.get(("m.room.create", ""))
if len(partial_current_state_ids) == 1 and create_event_id:
# We can only get here if we're in the process of creating the room
return True
for etype, state_key in current_state_ids:
for etype, state_key in partial_current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
continue
event_id = current_state_ids[(etype, state_key)]
event_id = partial_current_state_ids[(etype, state_key)]
event = await self.store.get_event(event_id, allow_none=True)
if not event:
continue
@ -1729,8 +1782,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
]
if len(remote_room_hosts) == 0:
raise SynapseError(
404,
raise NoKnownServersError(
"Can't join remote room because no servers "
"that are in the room have been provided.",
)
@ -1961,7 +2013,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
raise NoKnownServersError()
return await self.federation_handler.do_knock(
remote_room_hosts, room_id, user.to_string(), content=content

View File

@ -15,8 +15,7 @@
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.handlers.room_member import NoKnownServersError, RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteKnockRestServlet as ReplRemoteKnock,
@ -52,7 +51,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
raise NoKnownServersError()
ret = await self._remote_join_client(
requester=requester,

View File

@ -521,8 +521,8 @@ class RoomSummaryHandler:
It should return true if:
* The requester is joined or can join the room (per MSC3173).
* The origin server has any user that is joined or can join the room.
* The requesting user is joined or can join the room (per MSC3173); or
* The origin server has any user that is joined or can join the room; or
* The history visibility is set to world readable.
Args:

View File

@ -269,6 +269,8 @@ class SyncHandler:
self._state_storage_controller = self._storage_controllers.state
self._device_handler = hs.get_device_handler()
self.should_calculate_push_rules = hs.config.push.enable_push
# TODO: flush cache entries on subsequent sync request.
# Once we get the next /sync request (ie, one with the same access token
# that sets 'since' to 'next_batch'), we know that device won't need a
@ -1288,6 +1290,12 @@ class SyncHandler:
async def unread_notifs_for_room_id(
self, room_id: str, sync_config: SyncConfig
) -> RoomNotifCounts:
if not self.should_calculate_push_rules:
# If push rules have been universally disabled then we know we won't
# have any unread counts in the DB, so we may as well skip asking
# the DB.
return RoomNotifCounts.empty()
with Measure(self.clock, "unread_notifs_for_room_id"):
return await self.store.get_unread_event_push_actions_by_room_for_user(
@ -1391,6 +1399,11 @@ class SyncHandler:
for room_id, is_partial_state in results.items()
if is_partial_state
)
membership_change_events = [
event
for event in membership_change_events
if not results.get(event.room_id, False)
]
# Incremental eager syncs should additionally include rooms that
# - we are joined to
@ -1444,9 +1457,9 @@ class SyncHandler:
logger.debug("Fetching account data")
account_data_by_room = await self._generate_sync_entry_for_account_data(
sync_result_builder
)
# Global account data is included if it is not filtered out.
if not sync_config.filter_collection.blocks_all_global_account_data():
await self._generate_sync_entry_for_account_data(sync_result_builder)
# Presence data is included if the server has it enabled and not filtered out.
include_presence_data = bool(
@ -1472,9 +1485,7 @@ class SyncHandler:
(
newly_joined_rooms,
newly_left_rooms,
) = await self._generate_sync_entry_for_rooms(
sync_result_builder, account_data_by_room
)
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
# Work out which users have joined or left rooms we're in. We use this
# to build the presence and device_list parts of the sync response in
@ -1521,7 +1532,7 @@ class SyncHandler:
one_time_keys_count = await self.store.count_e2e_one_time_keys(
user_id, device_id
)
unused_fallback_key_types = (
unused_fallback_key_types = list(
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
)
@ -1717,35 +1728,29 @@ class SyncHandler:
async def _generate_sync_entry_for_account_data(
self, sync_result_builder: "SyncResultBuilder"
) -> Dict[str, Dict[str, JsonDict]]:
"""Generates the account data portion of the sync response.
) -> None:
"""Generates the global account data portion of the sync response.
Account data (called "Client Config" in the spec) can be set either globally
or for a specific room. Account data consists of a list of events which
accumulate state, much like a room.
This function retrieves global and per-room account data. The former is written
to the given `sync_result_builder`. The latter is returned directly, to be
later written to the `sync_result_builder` on a room-by-room basis.
This function retrieves global account data and writes it to the given
`sync_result_builder`. See `_generate_sync_entry_for_rooms` for handling
of per-room account data.
Args:
sync_result_builder
Returns:
A dictionary whose keys (room ids) map to the per room account data for that
room.
"""
sync_config = sync_result_builder.sync_config
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
if since_token and not sync_result_builder.full_state:
# TODO Do not fetch room account data if it will be unused.
(
global_account_data,
account_data_by_room,
) = await self.store.get_updated_account_data_for_user(
user_id, since_token.account_data_key
global_account_data = (
await self.store.get_updated_global_account_data_for_user(
user_id, since_token.account_data_key
)
)
push_rules_changed = await self.store.have_push_rules_changed_for_user(
@ -1753,31 +1758,31 @@ class SyncHandler:
)
if push_rules_changed:
global_account_data = dict(global_account_data)
global_account_data["m.push_rules"] = await self.push_rules_for_user(
sync_config.user
)
else:
# TODO Do not fetch room account data if it will be unused.
(
global_account_data,
account_data_by_room,
) = await self.store.get_account_data_for_user(sync_config.user.to_string())
all_global_account_data = await self.store.get_global_account_data_for_user(
user_id
)
global_account_data = dict(all_global_account_data)
global_account_data["m.push_rules"] = await self.push_rules_for_user(
sync_config.user
)
account_data_for_user = await sync_config.filter_collection.filter_account_data(
[
{"type": account_data_type, "content": content}
for account_data_type, content in global_account_data.items()
]
account_data_for_user = (
await sync_config.filter_collection.filter_global_account_data(
[
{"type": account_data_type, "content": content}
for account_data_type, content in global_account_data.items()
]
)
)
sync_result_builder.account_data = account_data_for_user
return account_data_by_room
async def _generate_sync_entry_for_presence(
self,
sync_result_builder: "SyncResultBuilder",
@ -1837,9 +1842,7 @@ class SyncHandler:
sync_result_builder.presence = presence
async def _generate_sync_entry_for_rooms(
self,
sync_result_builder: "SyncResultBuilder",
account_data_by_room: Dict[str, Dict[str, JsonDict]],
self, sync_result_builder: "SyncResultBuilder"
) -> Tuple[AbstractSet[str], AbstractSet[str]]:
"""Generates the rooms portion of the sync response. Populates the
`sync_result_builder` with the result.
@ -1850,7 +1853,6 @@ class SyncHandler:
Args:
sync_result_builder
account_data_by_room: Dictionary of per room account data
Returns:
Returns a 2-tuple describing rooms the user has joined or left.
@ -1863,9 +1865,30 @@ class SyncHandler:
since_token = sync_result_builder.since_token
user_id = sync_result_builder.sync_config.user.to_string()
blocks_all_rooms = (
sync_result_builder.sync_config.filter_collection.blocks_all_rooms()
)
# 0. Start by fetching room account data (if required).
if (
blocks_all_rooms
or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data()
):
account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {}
elif since_token and not sync_result_builder.full_state:
account_data_by_room = (
await self.store.get_updated_room_account_data_for_user(
user_id, since_token.account_data_key
)
)
else:
account_data_by_room = await self.store.get_room_account_data_for_user(
user_id
)
# 1. Start by fetching all ephemeral events in rooms we've joined (if required).
block_all_room_ephemeral = (
sync_result_builder.sync_config.filter_collection.blocks_all_rooms()
blocks_all_rooms
or sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral()
)
if block_all_room_ephemeral:
@ -2291,8 +2314,8 @@ class SyncHandler:
sync_result_builder: "SyncResultBuilder",
room_builder: "RoomSyncResultBuilder",
ephemeral: List[JsonDict],
tags: Optional[Dict[str, Dict[str, Any]]],
account_data: Dict[str, JsonDict],
tags: Optional[Mapping[str, Mapping[str, Any]]],
account_data: Mapping[str, JsonDict],
always_include: bool = False,
) -> None:
"""Populates the `joined` and `archived` section of `sync_result_builder`

View File

@ -13,7 +13,8 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, ClassVar, Sequence, Type
from twisted.web.client import PartialDownloadError
@ -27,19 +28,28 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class UserInteractiveAuthChecker:
class UserInteractiveAuthChecker(ABC):
"""Abstract base class for an interactive auth checker"""
def __init__(self, hs: "HomeServer"):
# This should really be an "abstract class property", i.e. it should
# be an error to instantiate a subclass that doesn't specify an AUTH_TYPE.
# But calling this a `ClassVar` is simpler than a decorator stack of
# @property @abstractmethod and @classmethod (if that's even the right order).
AUTH_TYPE: ClassVar[str]
def __init__(self, hs: "HomeServer"): # noqa: B027
pass
@abstractmethod
def is_enabled(self) -> bool:
"""Check if the configuration of the homeserver allows this checker to work
Returns:
True if this login type is enabled.
"""
raise NotImplementedError()
@abstractmethod
async def check_auth(self, authdict: dict, clientip: str) -> Any:
"""Given the authentication dict from the client, attempt to check this step
@ -304,7 +314,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
)
INTERACTIVE_AUTH_CHECKERS = [
INTERACTIVE_AUTH_CHECKERS: Sequence[Type[UserInteractiveAuthChecker]] = [
DummyAuthChecker,
TermsAuthChecker,
RecaptchaAuthChecker,

View File

@ -1267,7 +1267,7 @@ class MatrixFederationHttpClient:
def _flatten_response_never_received(e: BaseException) -> str:
if hasattr(e, "reasons"):
reasons = ", ".join(
_flatten_response_never_received(f.value) for f in e.reasons # type: ignore[attr-defined]
_flatten_response_never_received(f.value) for f in e.reasons
)
return "%s:[%s]" % (type(e).__name__, reasons)

View File

@ -30,7 +30,6 @@ from typing import (
Iterable,
Iterator,
List,
NoReturn,
Optional,
Pattern,
Tuple,
@ -340,7 +339,8 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
return callback_return
return _unrecognised_request_handler(request)
# A request with an unknown method (for a known endpoint) was received.
raise UnrecognizedRequestError(code=405)
@abc.abstractmethod
def _send_response(
@ -396,7 +396,6 @@ class DirectServeJsonResource(_AsyncResource):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _PathEntry:
pattern: Pattern
callback: ServletCallback
servlet_classname: str
@ -425,13 +424,14 @@ class JsonResource(DirectServeJsonResource):
):
super().__init__(canonical_json, extract_context)
self.clock = hs.get_clock()
self.path_regexs: Dict[bytes, List[_PathEntry]] = {}
# Map of path regex -> method -> callback.
self._routes: Dict[Pattern[str], Dict[bytes, _PathEntry]] = {}
self.hs = hs
def register_paths(
self,
method: str,
path_patterns: Iterable[Pattern],
path_patterns: Iterable[Pattern[str]],
callback: ServletCallback,
servlet_classname: str,
) -> None:
@ -455,8 +455,8 @@ class JsonResource(DirectServeJsonResource):
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method_bytes, []).append(
_PathEntry(path_pattern, callback, servlet_classname)
self._routes.setdefault(path_pattern, {})[method_bytes] = _PathEntry(
callback, servlet_classname
)
def _get_handler_for_request(
@ -478,14 +478,17 @@ class JsonResource(DirectServeJsonResource):
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request_method, []):
m = path_entry.pattern.match(request_path)
for path_pattern, methods in self._routes.items():
m = path_pattern.match(request_path)
if m:
# We found a match!
# We found a matching path!
path_entry = methods.get(request_method)
if not path_entry:
raise UnrecognizedRequestError(code=405)
return path_entry.callback, path_entry.servlet_classname, m.groupdict()
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
return _unrecognised_request_handler, "unrecognised_request_handler", {}
# Huh. No one wanted to handle that? Fiiiiiine.
raise UnrecognizedRequestError(code=404)
async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
@ -567,19 +570,6 @@ class StaticResource(File):
return super().render_GET(request)
def _unrecognised_request_handler(request: Request) -> NoReturn:
"""Request handler for unrecognised requests
This is a request handler suitable for return from
_get_handler_for_request. It actually just raises an
UnrecognizedRequestError.
Args:
request: Unused, but passed in to match the signature of ServletCallback.
"""
raise UnrecognizedRequestError(code=404)
class UnrecognizedRequestResource(resource.Resource):
"""
Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an

View File

@ -188,7 +188,7 @@ from typing import (
)
import attr
from typing_extensions import ParamSpec
from typing_extensions import Concatenate, ParamSpec
from twisted.internet import defer
from twisted.web.http import Request
@ -445,7 +445,7 @@ def init_tracer(hs: "HomeServer") -> None:
opentracing = None # type: ignore[assignment]
return
if not opentracing or not JaegerConfig:
if opentracing is None or JaegerConfig is None:
raise ConfigError(
"The server has been configured to use opentracing but opentracing is not "
"installed."
@ -466,8 +466,16 @@ def init_tracer(hs: "HomeServer") -> None:
STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name()
)
jaeger_config = hs.config.tracing.jaeger_config
tags = jaeger_config.setdefault("tags", {})
# tag the Synapse instance name so that it's an easy jumping
# off point into the logs. Can also be used to filter for an
# instance that is under load.
tags[SynapseTags.INSTANCE_NAME] = hs.get_instance_name()
config = JaegerConfig(
config=hs.config.tracing.jaeger_config,
config=jaeger_config,
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
scope_manager=LogContextScopeManager(),
metrics_factory=PrometheusMetricsFactory(),
@ -864,7 +872,7 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte
def _custom_sync_async_decorator(
func: Callable[P, R],
wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]],
wrapping_logic: Callable[Concatenate[Callable[P, R], P], ContextManager[None]],
) -> Callable[P, R]:
"""
Decorates a function that is sync or async (coroutines), or that returns a Twisted
@ -894,10 +902,14 @@ def _custom_sync_async_decorator(
"""
if inspect.iscoroutinefunction(func):
# In this branch, R = Awaitable[RInner], for some other type RInner
@wraps(func)
async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
async def _wrapper(
*args: P.args, **kwargs: P.kwargs
) -> Any: # Return type is RInner
with wrapping_logic(func, *args, **kwargs):
# type-ignore: func() returns R, but mypy doesn't know that R is
# Awaitable here.
return await func(*args, **kwargs) # type: ignore[misc]
else:
@ -964,7 +976,11 @@ def trace_with_opname(
if not opentracing:
return func
return _custom_sync_async_decorator(func, _wrapping_logic)
# type-ignore: mypy seems to be confused by the ParamSpecs here.
# I think the problem is https://github.com/python/mypy/issues/12909
return _custom_sync_async_decorator(
func, _wrapping_logic # type: ignore[arg-type]
)
return _decorator
@ -1010,7 +1026,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
yield
return _custom_sync_async_decorator(func, _wrapping_logic)
# type-ignore: mypy seems to be confused by the ParamSpecs here.
# I think the problem is https://github.com/python/mypy/issues/12909
return _custom_sync_async_decorator(func, _wrapping_logic) # type: ignore[arg-type]
@contextlib.contextmanager

View File

@ -22,6 +22,7 @@ from typing import (
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
@ -43,6 +44,7 @@ from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
from synapse.types import JsonValue
from synapse.types.state import StateFilter
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
@ -148,7 +150,7 @@ class BulkPushRuleEvaluator:
# little, we can skip fetching a huge number of push rules in large rooms.
# This helps make joins and leaves faster.
if event.type == EventTypes.Member:
local_users = []
local_users: Sequence[str] = []
# We never notify a user about their own actions. This is enforced in
# `_action_for_event_by_user` in the loop over `rules_by_user`, but we
# do the same check here to avoid unnecessary DB queries.
@ -183,7 +185,6 @@ class BulkPushRuleEvaluator:
if event.type == EventTypes.Member and event.membership == Membership.INVITE:
invited = event.state_key
if invited and self.hs.is_mine_id(invited) and invited not in local_users:
local_users = list(local_users)
local_users.append(invited)
if not local_users:
@ -256,13 +257,15 @@ class BulkPushRuleEvaluator:
return pl_event.content if pl_event else {}, sender_level
async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]:
async def _related_events(
self, event: EventBase
) -> Dict[str, Dict[str, JsonValue]]:
"""Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation
Returns:
Mapping of relation type to flattened events.
"""
related_events: Dict[str, Dict[str, str]] = {}
related_events: Dict[str, Dict[str, JsonValue]] = {}
if self._related_event_match_enabled:
related_event_id = event.content.get("m.relates_to", {}).get("event_id")
relation_type = event.content.get("m.relates_to", {}).get("rel_type")
@ -271,7 +274,10 @@ class BulkPushRuleEvaluator:
related_event_id, allow_none=True
)
if related_event is not None:
related_events[relation_type] = _flatten_dict(related_event)
related_events[relation_type] = _flatten_dict(
related_event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
)
reply_event_id = (
event.content.get("m.relates_to", {})
@ -286,7 +292,10 @@ class BulkPushRuleEvaluator:
)
if related_event is not None:
related_events["m.in_reply_to"] = _flatten_dict(related_event)
related_events["m.in_reply_to"] = _flatten_dict(
related_event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
)
# indicate that this is from a fallback relation.
if relation_type == "m.thread" and event.content.get(
@ -391,7 +400,6 @@ class BulkPushRuleEvaluator:
mentions = event.content.get(EventContentFields.MSC3952_MENTIONS)
has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict)
user_mentions: Set[str] = set()
room_mention = False
if has_mentions:
# mypy seems to have lost the type even though it must be a dict here.
assert isinstance(mentions, dict)
@ -401,14 +409,14 @@ class BulkPushRuleEvaluator:
user_mentions = set(
filter(lambda item: isinstance(item, str), user_mentions_raw)
)
# Room mention is only true if the value is exactly true.
room_mention = mentions.get("room") is True
evaluator = PushRuleEvaluator(
_flatten_dict(event),
_flatten_dict(
event,
msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
),
has_mentions,
user_mentions,
room_mention,
room_member_count,
sender_power_level,
notification_levels,
@ -416,6 +424,8 @@ class BulkPushRuleEvaluator:
self._related_event_match_enabled,
event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
self.hs.config.experimental.msc3758_exact_event_match,
self.hs.config.experimental.msc3966_exact_event_property_contains,
)
users = rules_by_user.keys()
@ -489,16 +499,22 @@ RulesByUser = Dict[str, List[Rule]]
StateGroup = Union[object, int]
def _is_simple_value(value: Any) -> bool:
return isinstance(value, (bool, str)) or type(value) is int or value is None
def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
result: Optional[Dict[str, JsonValue]] = None,
*,
msc3783_escape_event_match_key: bool = False,
) -> Dict[str, JsonValue]:
"""
Given a JSON dictionary (or event) which might contain sub dictionaries,
flatten it into a single layer dictionary by combining the keys & sub-keys.
Any (non-dictionary), non-string value is dropped.
String, integer, boolean, null or lists of those values are kept. All others are dropped.
Transforms:
@ -521,11 +537,24 @@ def _flatten_dict(
if result is None:
result = {}
for key, value in d.items():
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
if msc3783_escape_event_match_key:
# Escape periods in the key with a backslash (and backslashes with an
# extra backslash). This is since a period is used as a separator between
# nested fields.
key = key.replace("\\", "\\\\").replace(".", "\\.")
if _is_simple_value(value):
result[".".join(prefix + [key])] = value
elif isinstance(value, (list, tuple)):
result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)]
elif isinstance(value, Mapping):
# do not set `room_version` due to recursion considerations below
_flatten_dict(value, prefix=(prefix + [key]), result=result)
_flatten_dict(
value,
prefix=(prefix + [key]),
result=result,
msc3783_escape_event_match_key=msc3783_escape_event_match_key,
)
# `room_version` should only ever be set when looking at the top level of an event
if (

View File

@ -370,15 +370,23 @@ class ReplicationDataHandler:
# We measure here to get in flight counts and average waiting time.
with Measure(self._clock, "repl.wait_for_stream_position"):
logger.info(
"Waiting for repl stream %r to reach %s (%s)",
"Waiting for repl stream %r to reach %s (%s); currently at: %s",
stream_name,
position,
instance_name,
current_position,
)
try:
await make_deferred_yieldable(deferred)
except defer.TimeoutError:
logger.error("Timed out waiting for stream %s", stream_name)
logger.error(
"Timed out waiting for repl stream %r to reach %s (%s)"
"; currently at: %s",
stream_name,
position,
instance_name,
self._streams[stream_name].current_token(instance_name),
)
return
logger.info(

View File

@ -15,7 +15,7 @@
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.constants import Direction
from synapse.api.errors import Codes, NotFoundError, SynapseError
@ -285,7 +285,12 @@ class DeleteMediaByDateSize(RestServlet):
timestamp and size.
"""
PATTERNS = admin_patterns("/media/(?P<server_name>[^/]*)/delete$")
PATTERNS = [
*admin_patterns("/media/delete$"),
# This URL kept around for legacy reasons, it is undesirable since it
# overlaps with the DeleteMediaByID servlet.
*admin_patterns("/media/(?P<server_name>[^/]*)/delete$"),
]
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
@ -294,7 +299,7 @@ class DeleteMediaByDateSize(RestServlet):
self.media_repository = hs.get_media_repository()
async def on_POST(
self, request: SynapseRequest, server_name: str
self, request: SynapseRequest, server_name: Optional[str] = None
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
@ -322,7 +327,8 @@ class DeleteMediaByDateSize(RestServlet):
errcode=Codes.INVALID_PARAM,
)
if self.server_name != server_name:
# This check is useless, we keep it for the legacy endpoint only.
if server_name is not None and self.server_name != server_name:
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media")
logging.info(
@ -489,6 +495,8 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
ProtectMediaByID(hs).register(http_server)
UnprotectMediaByID(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
# XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as
# their URL routes overlap.
DeleteMediaByDateSize(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
UserMediaRestServlet(hs).register(http_server)

View File

@ -1192,7 +1192,8 @@ class AccountDataRestServlet(RestServlet):
if not await self._store.get_user_by_id(user_id):
raise NotFoundError("User not found")
global_data, by_room_data = await self._store.get_account_data_for_user(user_id)
global_data = await self._store.get_global_account_data_for_user(user_id)
by_room_data = await self._store.get_room_account_data_for_user(user_id)
return HTTPStatus.OK, {
"account_data": {
"global": global_data,

View File

@ -737,12 +737,7 @@ class ThreepidUnbindRestServlet(RestServlet):
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
# unbind from all identity servers this threepid has been added to in the past
result = await self.identity_handler.try_unbind_threepid(
requester.user.to_string(),
{
"address": body.address,
"medium": body.medium,
"id_server": body.id_server,
},
requester.user.to_string(), body.medium, body.address, body.id_server
)
return 200, {"id_server_unbind_result": "success" if result else "no-support"}

View File

@ -16,7 +16,7 @@ import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, SynapseError
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@ -39,6 +39,7 @@ class ReportEventRestServlet(RestServlet):
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self._event_handler = self.hs.get_event_handler()
async def on_POST(
self, request: SynapseRequest, room_id: str, event_id: str
@ -61,6 +62,14 @@ class ReportEventRestServlet(RestServlet):
Codes.BAD_JSON,
)
event = await self._event_handler.get_event(
requester.user, room_id, event_id, show_redacted=False
)
if event is None:
raise NotFoundError(
"Unable to report event: it does not exist or you aren't able to see it."
)
await self.store.add_event_report(
room_id=room_id,
event_id=event_id,

View File

@ -259,6 +259,32 @@ class RoomKeysNewVersionServlet(RestServlet):
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""
Retrieve the version information about the most current backup version (if any)
It takes out an exclusive lock on this user's room_key backups, to ensure
clients only upload to the current backup.
Returns 404 if the given version does not exist.
GET /room_keys/version HTTP/1.1
{
"version": "12345",
"algorithm": "m.megolm_backup.v1",
"auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
try:
info = await self.e2e_room_keys_handler.get_version_info(user_id)
except SynapseError as e:
if e.code == 404:
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
return 200, info
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""
Create a new backup version for this user's room_keys with the given
@ -301,7 +327,7 @@ class RoomKeysNewVersionServlet(RestServlet):
class RoomKeysVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version(/(?P<version>[^/]+))?$")
PATTERNS = client_patterns("/room_keys/version/(?P<version>[^/]+)$")
def __init__(self, hs: "HomeServer"):
super().__init__()
@ -309,12 +335,11 @@ class RoomKeysVersionServlet(RestServlet):
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
async def on_GET(
self, request: SynapseRequest, version: Optional[str]
self, request: SynapseRequest, version: str
) -> Tuple[int, JsonDict]:
"""
Retrieve the version information about a given version of the user's
room_keys backup. If the version part is missing, returns info about the
most current backup version (if any)
room_keys backup.
It takes out an exclusive lock on this user's room_key backups, to ensure
clients only upload to the current backup.
@ -339,20 +364,16 @@ class RoomKeysVersionServlet(RestServlet):
return 200, info
async def on_DELETE(
self, request: SynapseRequest, version: Optional[str]
self, request: SynapseRequest, version: str
) -> Tuple[int, JsonDict]:
"""
Delete the information about a given version of the user's
room_keys backup. If the version part is missing, deletes the most
current backup version (if any). Doesn't delete the actual room data.
room_keys backup. Doesn't delete the actual room data.
DELETE /room_keys/version/12345 HTTP/1.1
HTTP/1.1 200 OK
{}
"""
if version is None:
raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND)
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
@ -360,7 +381,7 @@ class RoomKeysVersionServlet(RestServlet):
return 200, {}
async def on_PUT(
self, request: SynapseRequest, version: Optional[str]
self, request: SynapseRequest, version: str
) -> Tuple[int, JsonDict]:
"""
Update the information about a given version of the user's room_keys backup.
@ -386,11 +407,6 @@ class RoomKeysVersionServlet(RestServlet):
user_id = requester.user.to_string()
info = parse_json_object_from_request(request)
if version is None:
raise SynapseError(
400, "No version specified to update", Codes.MISSING_PARAM
)
await self.e2e_room_keys_handler.update_version(user_id, version, info)
return 200, {}

View File

@ -34,7 +34,9 @@ class TagListServlet(RestServlet):
GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1
"""
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags")
PATTERNS = client_patterns(
"/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags$"
)
def __init__(self, hs: "HomeServer"):
super().__init__()

View File

@ -16,6 +16,7 @@
import logging
import os
import urllib
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
@ -284,13 +285,14 @@ async def respond_with_responder(
finish_request(request)
class Responder:
class Responder(ABC):
"""Represents a response that can be streamed to the requester.
Responder is a context manager which *must* be used, so that any resources
held can be cleaned up.
"""
@abstractmethod
def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
"""Stream response into consumer
@ -300,11 +302,12 @@ class Responder:
Returns:
Resolves once the response has finished being written
"""
raise NotImplementedError()
def __enter__(self) -> None:
def __enter__(self) -> None: # noqa: B027
pass
def __exit__(
def __exit__( # noqa: B027
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],

View File

@ -46,10 +46,9 @@ from ._base import FileInfo, Responder
from .filepath import MediaFilePaths
if TYPE_CHECKING:
from synapse.rest.media.v1.storage_provider import StorageProvider
from synapse.server import HomeServer
from .storage_provider import StorageProviderWrapper
logger = logging.getLogger(__name__)
@ -68,7 +67,7 @@ class MediaStorage:
hs: "HomeServer",
local_media_directory: str,
filepaths: MediaFilePaths,
storage_providers: Sequence["StorageProviderWrapper"],
storage_providers: Sequence["StorageProvider"],
):
self.hs = hs
self.reactor = hs.get_reactor()
@ -360,7 +359,7 @@ class ReadableFileWrapper:
clock: Clock
path: str
async def write_chunks_to(self, callback: Callable[[bytes], None]) -> None:
async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None:
"""Reads the file in chunks and calls the callback with each chunk."""
with open(self.path, "rb") as file:

View File

@ -21,7 +21,7 @@
import abc
import functools
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, cast
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
from twisted.internet.interfaces import IOpenSSLContextFactory
from twisted.internet.tcp import Port
@ -144,10 +144,10 @@ if TYPE_CHECKING:
from synapse.handlers.saml import SamlHandler
T = TypeVar("T", bound=Callable[..., Any])
T = TypeVar("T")
def cache_in_self(builder: T) -> T:
def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]:
"""Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
returning if so. If not, calls the given function and sets `self.foo` to it.
@ -166,7 +166,7 @@ def cache_in_self(builder: T) -> T:
building = [False]
@functools.wraps(builder)
def _get(self):
def _get(self: "HomeServer") -> T:
try:
return getattr(self, depname)
except AttributeError:
@ -185,9 +185,7 @@ def cache_in_self(builder: T) -> T:
return dep
# We cast here as we need to tell mypy that `_get` has the same signature as
# `builder`.
return cast(T, _get)
return _get
class HomeServer(metaclass=abc.ABCMeta):
@ -829,6 +827,7 @@ class HomeServer(metaclass=abc.ABCMeta):
hs=self,
host=self.config.redis.redis_host,
port=self.config.redis.redis_port,
dbid=self.config.redis.redis_dbid,
password=self.config.redis.redis_password,
reconnect=True,
)

View File

@ -39,7 +39,11 @@ from prometheus_client import Counter, Histogram
from synapse.api.constants import EventTypes
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.snapshot import (
EventContext,
UnpersistedEventContext,
UnpersistedEventContextBase,
)
from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2
@ -222,7 +226,7 @@ class StateHandler:
return await ret.get_state(self._state_storage_controller, state_filter)
async def get_current_user_ids_in_room(
self, room_id: str, latest_event_ids: List[str]
self, room_id: str, latest_event_ids: Collection[str]
) -> Set[str]:
"""
Get the users IDs who are currently in a room.
@ -262,31 +266,31 @@ class StateHandler:
state = await entry.get_state(self._state_storage_controller, StateFilter.all())
return await self.store.get_joined_hosts(room_id, state, entry)
async def compute_event_context(
async def calculate_context_info(
self,
event: EventBase,
state_ids_before_event: Optional[StateMap[str]] = None,
partial_state: Optional[bool] = None,
) -> EventContext:
"""Build an EventContext structure for a non-outlier event.
state_group_before_event: Optional[int] = None,
) -> UnpersistedEventContextBase:
"""
Calulates the contents of an unpersisted event context, other than the current
state group (which is either provided or calculated when the event context is persisted)
(for an outlier, call EventContext.for_outlier directly)
This works out what the current state should be for the event, and
generates a new state group if necessary.
Args:
event:
state_ids_before_event: The event ids of the state before the event if
it can't be calculated from existing events. This is normally
only specified when receiving an event from federation where we
don't have the prev events, e.g. when backfilling.
partial_state:
`True` if `state_ids_before_event` is partial and omits non-critical
membership events.
`False` if `state_ids_before_event` is the full state.
`None` when `state_ids_before_event` is not provided. In this case, the
flag will be calculated based on `event`'s prev events.
state_ids_before_event:
The event ids of the full state before the event if
it can't be calculated from existing events. This is normally
only specified when receiving an event from federation where we
don't have the prev events, e.g. when backfilling or when the event
is being created for batch persisting.
partial_state:
`True` if `state_ids_before_event` is partial and omits non-critical
membership events.
`False` if `state_ids_before_event` is the full state.
`None` when `state_ids_before_event` is not provided. In this case, the
flag will be calculated based on `event`'s prev events.
state_group_before_event:
the current state group at the time of event, if known
Returns:
The event context.
@ -294,7 +298,6 @@ class StateHandler:
RuntimeError if `state_ids_before_event` is not provided and one or more
prev events are missing or outliers.
"""
assert not event.internal_metadata.is_outlier()
#
@ -306,17 +309,6 @@ class StateHandler:
state_group_before_event_prev_group = None
deltas_to_state_group_before_event = None
# .. though we need to get a state group for it.
state_group_before_event = (
await self._state_storage_controller.store_state_group(
event.event_id,
event.room_id,
prev_group=None,
delta_ids=None,
current_state_ids=state_ids_before_event,
)
)
# the partial_state flag must be provided
assert partial_state is not None
else:
@ -345,6 +337,7 @@ class StateHandler:
logger.debug("calling resolve_state_groups from compute_event_context")
# we've already taken into account partial state, so no need to wait for
# complete state here.
entry = await self.resolve_state_groups_for_events(
event.room_id,
event.prev_event_ids(),
@ -383,18 +376,19 @@ class StateHandler:
#
if not event.is_state():
return EventContext.with_state(
return UnpersistedEventContext(
storage=self._storage_controllers,
state_group_before_event=state_group_before_event,
state_group=state_group_before_event,
state_group_after_event=state_group_before_event,
state_delta_due_to_event={},
prev_group=state_group_before_event_prev_group,
delta_ids=deltas_to_state_group_before_event,
prev_group_for_state_group_before_event=state_group_before_event_prev_group,
delta_ids_to_state_group_before_event=deltas_to_state_group_before_event,
partial_state=partial_state,
state_map_before_event=state_ids_before_event,
)
#
# otherwise, we'll need to create a new state group for after the event
# otherwise, we'll need to set up creating a new state group for after the event
#
key = (event.type, event.state_key)
@ -412,88 +406,60 @@ class StateHandler:
delta_ids = {key: event.event_id}
state_group_after_event = (
await self._state_storage_controller.store_state_group(
event.event_id,
event.room_id,
prev_group=state_group_before_event,
delta_ids=delta_ids,
current_state_ids=None,
)
return UnpersistedEventContext(
storage=self._storage_controllers,
state_group_before_event=state_group_before_event,
state_group_after_event=None,
state_delta_due_to_event=delta_ids,
prev_group_for_state_group_before_event=state_group_before_event_prev_group,
delta_ids_to_state_group_before_event=deltas_to_state_group_before_event,
partial_state=partial_state,
state_map_before_event=state_ids_before_event,
)
return EventContext.with_state(
storage=self._storage_controllers,
state_group=state_group_after_event,
state_group_before_event=state_group_before_event,
state_delta_due_to_event=delta_ids,
prev_group=state_group_before_event,
delta_ids=delta_ids,
async def compute_event_context(
self,
event: EventBase,
state_ids_before_event: Optional[StateMap[str]] = None,
partial_state: Optional[bool] = None,
) -> EventContext:
"""Build an EventContext structure for a non-outlier event.
(for an outlier, call EventContext.for_outlier directly)
This works out what the current state should be for the event, and
generates a new state group if necessary.
Args:
event:
state_ids_before_event: The event ids of the state before the event if
it can't be calculated from existing events. This is normally
only specified when receiving an event from federation where we
don't have the prev events, e.g. when backfilling.
partial_state:
`True` if `state_ids_before_event` is partial and omits non-critical
membership events.
`False` if `state_ids_before_event` is the full state.
`None` when `state_ids_before_event` is not provided. In this case, the
flag will be calculated based on `event`'s prev events.
entry:
A state cache entry for the resolved state across the prev events. We may
have already calculated this, so if it's available pass it in
Returns:
The event context.
Raises:
RuntimeError if `state_ids_before_event` is not provided and one or more
prev events are missing or outliers.
"""
unpersisted_context = await self.calculate_context_info(
event=event,
state_ids_before_event=state_ids_before_event,
partial_state=partial_state,
)
async def compute_event_context_for_batched(
self,
event: EventBase,
state_ids_before_event: StateMap[str],
current_state_group: int,
) -> EventContext:
"""
Generate an event context for an event that has not yet been persisted to the
database. Intended for use with events that are created to be persisted in a batch.
Args:
event: the event the context is being computed for
state_ids_before_event: a state map consisting of the state ids of the events
created prior to this event.
current_state_group: the current state group before the event.
"""
state_group_before_event_prev_group = None
deltas_to_state_group_before_event = None
state_group_before_event = current_state_group
# if the event is not state, we are set
if not event.is_state():
return EventContext.with_state(
storage=self._storage_controllers,
state_group_before_event=state_group_before_event,
state_group=state_group_before_event,
state_delta_due_to_event={},
prev_group=state_group_before_event_prev_group,
delta_ids=deltas_to_state_group_before_event,
partial_state=False,
)
# otherwise, we'll need to create a new state group for after the event
key = (event.type, event.state_key)
if state_ids_before_event is not None:
replaces = state_ids_before_event.get(key)
if replaces and replaces != event.event_id:
event.unsigned["replaces_state"] = replaces
delta_ids = {key: event.event_id}
state_group_after_event = (
await self._state_storage_controller.store_state_group(
event.event_id,
event.room_id,
prev_group=state_group_before_event,
delta_ids=delta_ids,
current_state_ids=None,
)
)
return EventContext.with_state(
storage=self._storage_controllers,
state_group=state_group_after_event,
state_group_before_event=state_group_before_event,
state_delta_due_to_event=delta_ids,
prev_group=state_group_before_event,
delta_ids=delta_ids,
partial_state=False,
)
return await unpersisted_context.persist(event)
@measure_func()
async def resolve_state_groups_for_events(

View File

@ -37,6 +37,8 @@ class SQLBaseStore(metaclass=ABCMeta):
per data store (and not one per physical database).
"""
db_pool: DatabasePool
def __init__(
self,
database: DatabasePool,

View File

@ -14,6 +14,7 @@
import logging
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Awaitable,
Callable,
@ -23,7 +24,6 @@ from typing import (
List,
Mapping,
Optional,
Set,
Tuple,
)
@ -527,7 +527,7 @@ class StateStorageController:
)
return state_map.get(key)
async def get_current_hosts_in_room(self, room_id: str) -> Set[str]:
async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]:
"""Get current hosts in room based on current state.
Blocks until we have full state for the given room. This only happens for rooms
@ -584,7 +584,7 @@ class StateStorageController:
async def get_users_in_room_with_profiles(
self, room_id: str
) -> Dict[str, ProfileInfo]:
) -> Mapping[str, ProfileInfo]:
"""
Get the current users in the room with their profiles.
If the room is currently partial-stated, this will block until the room has

View File

@ -499,6 +499,7 @@ class DatabasePool:
"""
_TXN_ID = 0
engine: BaseDatabaseEngine
def __init__(
self,

View File

@ -21,6 +21,7 @@ from typing import (
FrozenSet,
Iterable,
List,
Mapping,
Optional,
Tuple,
cast,
@ -122,25 +123,25 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
return self._account_data_id_gen.get_current_token()
@cached()
async def get_account_data_for_user(
async def get_global_account_data_for_user(
self, user_id: str
) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]:
) -> Mapping[str, JsonDict]:
"""
Get all the client account_data for a user.
Get all the global client account_data for a user.
If experimental MSC3391 support is enabled, any entries with an empty
content body are excluded; as this means they have been deleted.
Args:
user_id: The user to get the account_data for.
Returns:
A 2-tuple of a dict of global account_data and a dict mapping from
room_id string to per room account_data dicts.
The global account_data.
"""
def get_account_data_for_user_txn(
def get_global_account_data_for_user(
txn: LoggingTransaction,
) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]:
) -> Dict[str, JsonDict]:
# The 'content != '{}' condition below prevents us from using
# `simple_select_list_txn` here, as it doesn't support conditions
# other than 'equals'.
@ -158,10 +159,34 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
txn.execute(sql, (user_id,))
rows = self.db_pool.cursor_to_dict(txn)
global_account_data = {
return {
row["account_data_type"]: db_to_json(row["content"]) for row in rows
}
return await self.db_pool.runInteraction(
"get_global_account_data_for_user", get_global_account_data_for_user
)
@cached()
async def get_room_account_data_for_user(
self, user_id: str
) -> Mapping[str, Mapping[str, JsonDict]]:
"""
Get all of the per-room client account_data for a user.
If experimental MSC3391 support is enabled, any entries with an empty
content body are excluded; as this means they have been deleted.
Args:
user_id: The user to get the account_data for.
Returns:
A dict mapping from room_id string to per-room account_data dicts.
"""
def get_room_account_data_for_user_txn(
txn: LoggingTransaction,
) -> Dict[str, Dict[str, JsonDict]]:
# The 'content != '{}' condition below prevents us from using
# `simple_select_list_txn` here, as it doesn't support conditions
# other than 'equals'.
@ -185,10 +210,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
room_data[row["account_data_type"]] = db_to_json(row["content"])
return global_account_data, by_room
return by_room
return await self.db_pool.runInteraction(
"get_account_data_for_user", get_account_data_for_user_txn
"get_room_account_data_for_user_txn", get_room_account_data_for_user_txn
)
@cached(num_args=2, max_entries=5000, tree=True)
@ -215,7 +240,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
@cached(num_args=2, tree=True)
async def get_account_data_for_room(
self, user_id: str, room_id: str
) -> Dict[str, JsonDict]:
) -> Mapping[str, JsonDict]:
"""Get all the client account_data for a user for a room.
Args:
@ -342,36 +367,61 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
"get_updated_room_account_data", get_updated_room_account_data_txn
)
async def get_updated_account_data_for_user(
async def get_updated_global_account_data_for_user(
self, user_id: str, stream_id: int
) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]:
"""Get all the client account_data for a that's changed for a user
) -> Dict[str, JsonDict]:
"""Get all the global account_data that's changed for a user.
Args:
user_id: The user to get the account_data for.
stream_id: The point in the stream since which to get updates
Returns:
A deferred pair of a dict of global account_data and a dict
mapping from room_id string to per room account_data dicts.
A dict of global account_data.
"""
def get_updated_account_data_for_user_txn(
def get_updated_global_account_data_for_user(
txn: LoggingTransaction,
) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]:
sql = (
"SELECT account_data_type, content FROM account_data"
" WHERE user_id = ? AND stream_id > ?"
)
) -> Dict[str, JsonDict]:
sql = """
SELECT account_data_type, content FROM account_data
WHERE user_id = ? AND stream_id > ?
"""
txn.execute(sql, (user_id, stream_id))
global_account_data = {row[0]: db_to_json(row[1]) for row in txn}
return {row[0]: db_to_json(row[1]) for row in txn}
sql = (
"SELECT room_id, account_data_type, content FROM room_account_data"
" WHERE user_id = ? AND stream_id > ?"
)
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
return {}
return await self.db_pool.runInteraction(
"get_updated_global_account_data_for_user",
get_updated_global_account_data_for_user,
)
async def get_updated_room_account_data_for_user(
self, user_id: str, stream_id: int
) -> Dict[str, Dict[str, JsonDict]]:
"""Get all the room account_data that's changed for a user.
Args:
user_id: The user to get the account_data for.
stream_id: The point in the stream since which to get updates
Returns:
A dict mapping from room_id string to per room account_data dicts.
"""
def get_updated_room_account_data_for_user_txn(
txn: LoggingTransaction,
) -> Dict[str, Dict[str, JsonDict]]:
sql = """
SELECT room_id, account_data_type, content FROM room_account_data
WHERE user_id = ? AND stream_id > ?
"""
txn.execute(sql, (user_id, stream_id))
account_data_by_room: Dict[str, Dict[str, JsonDict]] = {}
@ -379,16 +429,17 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
room_account_data = account_data_by_room.setdefault(row[0], {})
room_account_data[row[1]] = db_to_json(row[2])
return global_account_data, account_data_by_room
return account_data_by_room
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
return {}, {}
return {}
return await self.db_pool.runInteraction(
"get_updated_account_data_for_user", get_updated_account_data_for_user_txn
"get_updated_room_account_data_for_user",
get_updated_room_account_data_for_user_txn,
)
@cached(max_entries=5000, iterable=True)
@ -444,7 +495,8 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
self.get_global_account_data_by_type_for_user.invalidate(
(row.user_id, row.data_type)
)
self.get_account_data_for_user.invalidate((row.user_id,))
self.get_global_account_data_for_user.invalidate((row.user_id,))
self.get_room_account_data_for_user.invalidate((row.user_id,))
self.get_account_data_for_room.invalidate((row.user_id, row.room_id))
self.get_account_data_for_room_and_type.invalidate(
(row.user_id, row.room_id, row.data_type)
@ -492,7 +544,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
)
self._account_data_stream_cache.entity_has_changed(user_id, next_id)
self.get_account_data_for_user.invalidate((user_id,))
self.get_room_account_data_for_user.invalidate((user_id,))
self.get_account_data_for_room.invalidate((user_id, room_id))
self.get_account_data_for_room_and_type.prefill(
(user_id, room_id, account_data_type), content
@ -558,7 +610,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
return None
self._account_data_stream_cache.entity_has_changed(user_id, next_id)
self.get_account_data_for_user.invalidate((user_id,))
self.get_room_account_data_for_user.invalidate((user_id,))
self.get_account_data_for_room.invalidate((user_id, room_id))
self.get_account_data_for_room_and_type.prefill(
(user_id, room_id, account_data_type), {}
@ -593,7 +645,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
)
self._account_data_stream_cache.entity_has_changed(user_id, next_id)
self.get_account_data_for_user.invalidate((user_id,))
self.get_global_account_data_for_user.invalidate((user_id,))
self.get_global_account_data_by_type_for_user.invalidate(
(user_id, account_data_type)
)
@ -761,7 +813,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
return None
self._account_data_stream_cache.entity_has_changed(user_id, next_id)
self.get_account_data_for_user.invalidate((user_id,))
self.get_global_account_data_for_user.invalidate((user_id,))
self.get_global_account_data_by_type_for_user.prefill(
(user_id, account_data_type), {}
)
@ -822,7 +874,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
txn, self.get_account_data_for_room_and_type, (user_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_account_data_for_user, (user_id,)
txn, self.get_global_account_data_for_user, (user_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_room_account_data_for_user, (user_id,)
)
self._invalidate_cache_and_stream(
txn, self.get_global_account_data_by_type_for_user, (user_id,)

View File

@ -166,7 +166,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore):
room_id: str,
app_service: "ApplicationService",
cache_context: _CacheContext,
) -> List[str]:
) -> Sequence[str]:
"""
Get all users in a room that the appservice controls.

View File

@ -21,6 +21,7 @@ from typing import (
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
@ -100,6 +101,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
("device_lists_outbound_pokes", "stream_id"),
("device_lists_changes_in_room", "stream_id"),
("device_lists_remote_pending", "stream_id"),
("device_lists_changes_converted_stream_position", "stream_id"),
],
is_writer=hs.config.worker.worker_app is None,
)
@ -201,7 +203,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
def get_device_stream_token(self) -> int:
return self._device_list_id_gen.get_current_token()
async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int:
async def count_devices_by_users(
self, user_ids: Optional[Collection[str]] = None
) -> int:
"""Retrieve number of all devices of given users.
Only returns number of devices that are not marked as hidden.
@ -212,7 +216,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
"""
def count_devices_by_users_txn(
txn: LoggingTransaction, user_ids: List[str]
txn: LoggingTransaction, user_ids: Collection[str]
) -> int:
sql = """
SELECT count(*)
@ -745,42 +749,47 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
@trace
@cancellable
async def get_user_devices_from_cache(
self, query_list: List[Tuple[str, Optional[str]]]
) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]]
) -> Tuple[Set[str], Dict[str, Mapping[str, JsonDict]]]:
"""Get the devices (and keys if any) for remote users from the cache.
Args:
query_list: List of (user_id, device_ids), if device_ids is
falsey then return all device ids for that user.
user_ids: users which should have all device IDs returned
user_and_device_ids: List of (user_id, device_ids)
Returns:
A tuple of (user_ids_not_in_cache, results_map), where
user_ids_not_in_cache is a set of user_ids and results_map is a
mapping of user_id -> device_id -> device_info.
"""
user_ids = {user_id for user_id, _ in query_list}
user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids))
unique_user_ids = user_ids | {user_id for user_id, _ in user_and_device_ids}
user_map = await self.get_device_list_last_stream_id_for_remotes(
list(unique_user_ids)
)
# We go and check if any of the users need to have their device lists
# resynced. If they do then we remove them from the cached list.
users_needing_resync = await self.get_user_ids_requiring_device_list_resync(
user_ids
unique_user_ids
)
user_ids_in_cache = {
user_id for user_id, stream_id in user_map.items() if stream_id
} - users_needing_resync
user_ids_not_in_cache = user_ids - user_ids_in_cache
user_ids_not_in_cache = unique_user_ids - user_ids_in_cache
results: Dict[str, Dict[str, JsonDict]] = {}
for user_id, device_id in query_list:
if user_id not in user_ids_in_cache:
continue
if device_id:
device = await self._get_cached_user_device(user_id, device_id)
results.setdefault(user_id, {})[device_id] = device
else:
# First fetch all the users which all devices are to be returned.
results: Dict[str, Mapping[str, JsonDict]] = {}
for user_id in user_ids:
if user_id in user_ids_in_cache:
results[user_id] = await self.get_cached_devices_for_user(user_id)
# Then fetch all device-specific requests, but skip users we've already
# fetched all devices for.
device_specific_results: Dict[str, Dict[str, JsonDict]] = {}
for user_id, device_id in user_and_device_ids:
if user_id in user_ids_in_cache and user_id not in user_ids:
device = await self._get_cached_user_device(user_id, device_id)
device_specific_results.setdefault(user_id, {})[device_id] = device
results.update(device_specific_results)
set_tag("in_cache", str(results))
set_tag("not_in_cache", str(user_ids_not_in_cache))
@ -798,7 +807,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
return db_to_json(content)
@cached()
async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]:
async def get_cached_devices_for_user(self, user_id: str) -> Mapping[str, JsonDict]:
devices = await self.db_pool.simple_select_list(
table="device_lists_remote_cache",
keyvalues={"user_id": user_id},

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional, Tuple
from typing import Iterable, List, Optional, Sequence, Tuple
import attr
@ -74,7 +74,7 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore):
)
@cached(max_entries=5000)
async def get_aliases_for_room(self, room_id: str) -> List[str]:
async def get_aliases_for_room(self, room_id: str) -> Sequence[str]:
return await self.db_pool.simple_select_onecol(
"room_aliases",
{"room_id": room_id},

View File

@ -20,7 +20,9 @@ from typing import (
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
@ -260,7 +262,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
for batch in batch_iter(signature_query, 50):
cross_sigs_result = await self.db_pool.runInteraction(
"get_e2e_cross_signing_signatures",
"get_e2e_cross_signing_signatures_for_devices",
self._get_e2e_cross_signing_signatures_for_devices_txn,
batch,
)
@ -691,7 +693,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
@cached(max_entries=10000)
async def get_e2e_unused_fallback_key_types(
self, user_id: str, device_id: str
) -> List[str]:
) -> Sequence[str]:
"""Returns the fallback key types that have an unused key.
Args:
@ -731,7 +733,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
return user_keys.get(key_type)
@cached(num_args=1)
def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Dict[str, JsonDict]:
def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Mapping[str, JsonDict]:
"""Dummy function. Only used to make a cache for
_get_bare_e2e_cross_signing_keys_bulk.
"""
@ -744,7 +746,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
)
async def _get_bare_e2e_cross_signing_keys_bulk(
self, user_ids: Iterable[str]
) -> Dict[str, Optional[Dict[str, JsonDict]]]:
) -> Dict[str, Optional[Mapping[str, JsonDict]]]:
"""Returns the cross-signing keys for a set of users. The output of this
function should be passed to _get_e2e_cross_signing_signatures_txn if
the signatures for the calling user need to be fetched.
@ -765,7 +767,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
)
# The `Optional` comes from the `@cachedList` decorator.
return cast(Dict[str, Optional[Dict[str, JsonDict]]], result)
return cast(Dict[str, Optional[Mapping[str, JsonDict]]], result)
def _get_bare_e2e_cross_signing_keys_bulk_txn(
self,
@ -924,7 +926,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
@cancellable
async def get_e2e_cross_signing_keys_bulk(
self, user_ids: List[str], from_user_id: Optional[str] = None
) -> Dict[str, Optional[Dict[str, JsonDict]]]:
) -> Dict[str, Optional[Mapping[str, JsonDict]]]:
"""Returns the cross-signing keys for a set of users.
Args:
@ -940,11 +942,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
if from_user_id:
result = await self.db_pool.runInteraction(
"get_e2e_cross_signing_signatures",
self._get_e2e_cross_signing_signatures_txn,
result,
from_user_id,
result = cast(
Dict[str, Optional[Mapping[str, JsonDict]]],
await self.db_pool.runInteraction(
"get_e2e_cross_signing_signatures",
self._get_e2e_cross_signing_signatures_txn,
result,
from_user_id,
),
)
return result

View File

@ -22,6 +22,7 @@ from typing import (
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
cast,
@ -1004,7 +1005,9 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
room_id,
)
async def get_max_depth_of(self, event_ids: List[str]) -> Tuple[Optional[str], int]:
async def get_max_depth_of(
self, event_ids: Collection[str]
) -> Tuple[Optional[str], int]:
"""Returns the event ID and depth for the event that has the max depth from a set of event IDs
Args:
@ -1141,7 +1144,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
)
@cached(max_entries=5000, iterable=True)
async def get_latest_event_ids_in_room(self, room_id: str) -> List[str]:
async def get_latest_event_ids_in_room(self, room_id: str) -> Sequence[str]:
return await self.db_pool.simple_select_onecol(
table="event_forward_extremities",
keyvalues={"room_id": room_id},
@ -1171,7 +1174,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
@cancellable
async def get_forward_extremities_for_room_at_stream_ordering(
self, room_id: str, stream_ordering: int
) -> List[str]:
) -> Sequence[str]:
"""For a given room_id and stream_ordering, return the forward
extremeties of the room at that point in "time".
@ -1204,7 +1207,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
@cached(max_entries=5000, num_args=2)
async def _get_forward_extremeties_for_room(
self, room_id: str, stream_ordering: int
) -> List[str]:
) -> Sequence[str]:
"""For a given room_id and stream_ordering, return the forward
extremeties of the room at that point in "time".

View File

@ -203,11 +203,18 @@ class RoomNotifCounts:
# Map of thread ID to the notification counts.
threads: Dict[str, NotifCounts]
@staticmethod
def empty() -> "RoomNotifCounts":
return _EMPTY_ROOM_NOTIF_COUNTS
def __len__(self) -> int:
# To properly account for the amount of space in any caches.
return len(self.threads) + 1
_EMPTY_ROOM_NOTIF_COUNTS = RoomNotifCounts(NotifCounts(), {})
def _serialize_action(
actions: Collection[Union[Mapping, str]], is_highlight: bool
) -> str:

View File

@ -16,7 +16,6 @@
import itertools
import logging
from collections import OrderedDict
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
Any,
@ -26,7 +25,6 @@ from typing import (
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
)
@ -36,7 +34,7 @@ from prometheus_client import Counter
import synapse.metrics
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.errors import PartialStateConflictError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
@ -52,7 +50,7 @@ from synapse.storage.databases.main.search import SearchEntry
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import AbstractStreamIdGenerator
from synapse.storage.util.sequence import SequenceGenerator
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id
from synapse.util import json_encoder
from synapse.util.iterutils import batch_iter, sorted_topologically
from synapse.util.stringutils import non_null_str_or_none
@ -72,24 +70,6 @@ event_counter = Counter(
)
class PartialStateConflictError(SynapseError):
"""An internal error raised when attempting to persist an event with partial state
after the room containing the event has been un-partial stated.
This error should be handled by recomputing the event context and trying again.
This error has an HTTP status code so that it can be transported over replication.
It should not be exposed to clients.
"""
def __init__(self) -> None:
super().__init__(
HTTPStatus.CONFLICT,
msg="Cannot persist partial state event in un-partial stated room",
errcode=Codes.UNKNOWN,
)
@attr.s(slots=True, auto_attribs=True)
class DeltaState:
"""Deltas to use to update the `current_state_events` table.
@ -306,7 +286,7 @@ class PersistEventsStore:
# The set of event_ids to return. This includes all soft-failed events
# and their prev events.
existing_prevs = set()
existing_prevs: Set[str] = set()
def _get_prevs_before_rejected_txn(
txn: LoggingTransaction, batch: Collection[str]
@ -571,7 +551,7 @@ class PersistEventsStore:
event_chain_id_gen: SequenceGenerator,
event_to_room_id: Dict[str, str],
event_to_types: Dict[str, Tuple[str, str]],
event_to_auth_chain: Dict[str, Sequence[str]],
event_to_auth_chain: Dict[str, StrCollection],
) -> None:
"""Calculate the chain cover index for the given events.
@ -865,7 +845,7 @@ class PersistEventsStore:
event_chain_id_gen: SequenceGenerator,
event_to_room_id: Dict[str, str],
event_to_types: Dict[str, Tuple[str, str]],
event_to_auth_chain: Dict[str, Sequence[str]],
event_to_auth_chain: Dict[str, StrCollection],
events_to_calc_chain_id_for: Set[str],
chain_map: Dict[str, Tuple[int, int]],
) -> Dict[str, Tuple[int, int]]:

View File

@ -13,7 +13,7 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, cast
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
import attr
@ -29,7 +29,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.events import PersistEventsStore
from synapse.storage.types import Cursor
from synapse.types import JsonDict
from synapse.types import JsonDict, StrCollection
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -1061,7 +1061,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
self.event_chain_id_gen, # type: ignore[attr-defined]
event_to_room_id,
event_to_types,
cast(Dict[str, Sequence[str]], event_to_auth_chain),
cast(Dict[str, StrCollection], event_to_auth_chain),
)
return _CalculateChainCover(

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, cast
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage.database import (
@ -95,7 +95,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
return await self.db_pool.runInteraction("count_users", _count_users)
@cached(num_args=0)
async def get_monthly_active_count_by_service(self) -> Dict[str, int]:
async def get_monthly_active_count_by_service(self) -> Mapping[str, int]:
"""Generates current count of monthly active users broken down by service.
A service is typically an appservice but also includes native matrix users.
Since the `monthly_active_users` table is populated from the `user_ips` table

View File

@ -420,12 +420,14 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"event_push_actions",
"event_search",
"event_failed_pull_attempts",
# Note: the partial state tables have foreign keys between each other, and to
# `events` and `rooms`. We need to delete from them in the right order.
"partial_state_events",
"partial_state_rooms_servers",
"partial_state_rooms",
"events",
"federation_inbound_events_staging",
"local_current_membership",
"partial_state_rooms_servers",
"partial_state_rooms",
"receipts_graph",
"receipts_linearized",
"room_aliases",

Some files were not shown because too many files have changed in this diff Show More