Merge branch 'release-v1.68' into matrix-org-hotfixes
commit
f4419438c1
|
@ -1,31 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="localhost", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
|
@ -32,7 +32,7 @@ else
|
|||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
psql -c "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
|
|
|
@ -2,27 +2,27 @@
|
|||
#
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - configures synapse and a postgres server.
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
# - runs the port script on a prepopulated test sqlite db. Checks that the
|
||||
# return code is zero.
|
||||
# - reruns the port script on the same sqlite db, targetting the same postgres db.
|
||||
# Checks that the return code is zero.
|
||||
# - runs the port script against a new sqlite db. Checks the return code is zero.
|
||||
#
|
||||
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
|
||||
# Expects `poetry` to be available on the `PATH`.
|
||||
|
||||
set -xe
|
||||
set -xe -o pipefail
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
|
||||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
psql -c "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
|
||||
|
@ -45,9 +45,23 @@ rm .ci/test_db.db
|
|||
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
poetry run .ci/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
psql \
|
||||
-c "DROP DATABASE synapse" \
|
||||
-c "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
echo "--- Create a brand new postgres database from schema"
|
||||
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
|
||||
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
|
||||
psql -c "CREATE DATABASE synapse_unported"
|
||||
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
|
||||
|
||||
echo "+++ Comparing ported schema with unported schema"
|
||||
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
|
||||
psql synapse -c "DROP TABLE port_from_sqlite3;"
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
|
||||
# By default, `diff` returns zero if there are no changes and nonzero otherwise
|
||||
diff -u unported.sql ported.sql | tee schema_diff
|
|
@ -11,5 +11,6 @@
|
|||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
synapse/*.so
|
||||
|
||||
**/__pycache__
|
||||
|
|
|
@ -32,9 +32,11 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install .
|
||||
- run: scripts-dev/generate_sample_config.sh --check
|
||||
- run: scripts-dev/config-lint.sh
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/generate_sample_config.sh --check
|
||||
- run: poetry run scripts-dev/config-lint.sh
|
||||
|
||||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -76,7 +78,6 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
|
@ -361,18 +362,22 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
@ -398,12 +403,27 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
id: run_tester_script
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
- name: "Upload schema differences"
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
|
||||
with:
|
||||
name: Schema dumps
|
||||
path: |
|
||||
unported.sql
|
||||
ported.sql
|
||||
schema_diff
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
|
|
89
CHANGES.md
89
CHANGES.md
|
@ -1,3 +1,92 @@
|
|||
Synapse 1.68.0rc1 (2022-09-20)
|
||||
==============================
|
||||
|
||||
Please note that Synapse will now refuse to start if configured to use a version of SQLite earlier than 3.27.
|
||||
|
||||
In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
|
||||
Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
|
||||
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1670).
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
|
||||
- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
|
||||
- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
|
||||
- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
|
||||
- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
|
||||
- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
|
||||
- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
|
||||
- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
|
||||
- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
|
||||
- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
|
||||
- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
|
||||
- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
|
||||
- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
|
||||
- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
|
||||
- Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse v1.22.0. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
|
||||
- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
|
||||
- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
|
||||
- Add docs for common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
|
||||
- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
|
||||
- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
|
||||
- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
|
||||
- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
|
||||
- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
|
||||
- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
|
||||
- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
|
||||
- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
|
||||
- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
|
||||
- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
|
||||
- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
|
||||
- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
|
||||
- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
|
||||
- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
|
||||
- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
|
||||
- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
|
||||
- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
|
||||
- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
|
||||
- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
|
||||
- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
|
||||
- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
|
||||
- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
|
||||
- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
|
||||
- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
|
||||
- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
|
||||
- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
|
||||
- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
|
||||
- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
|
||||
- Fix Docker build when Rust .so has been build locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
|
||||
- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
|
||||
- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
|
||||
- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
|
||||
|
||||
|
||||
Synapse 1.67.0 (2022-09-13)
|
||||
===========================
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Note that `libpq` is required on ARM-based Macs.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).
|
|
@ -1 +0,0 @@
|
|||
Add admin APIs to fetch messages within a particular window of time.
|
|
@ -1 +0,0 @@
|
|||
Cancel the processing of key query requests when they time out.
|
|
@ -1 +0,0 @@
|
|||
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).
|
|
@ -1 +0,0 @@
|
|||
Add & populate `event_stream_ordering` column on receipts table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar).
|
|
@ -1 +0,0 @@
|
|||
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.
|
|
@ -1 +0,0 @@
|
|||
Update trial old deps CI to use poetry 1.2.0.
|
|
@ -1 +0,0 @@
|
|||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
|
@ -1 +0,0 @@
|
|||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
|
@ -1 +0,0 @@
|
|||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
|
@ -1 +0,0 @@
|
|||
Fix typechecking with latest types-jsonschema.
|
|
@ -1 +0,0 @@
|
|||
Update trial old deps CI to use poetry 1.2.0.
|
|
@ -1 +0,0 @@
|
|||
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.
|
|
@ -1 +0,0 @@
|
|||
Fix a typo in the documentation for the login ratelimiting configuration.
|
|
@ -1 +0,0 @@
|
|||
Define Synapse's compatability policy for SQLite versions.
|
|
@ -1 +0,0 @@
|
|||
Strip number suffix from instance name to consolidate services that traces are spread over.
|
|
@ -1 +0,0 @@
|
|||
Instrument `get_metadata_for_events` for understandable traces in Jaeger.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug where Synapse fails to start if a signing key file contains an empty line.
|
|
@ -1 +0,0 @@
|
|||
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar).
|
|
@ -1 +0,0 @@
|
|||
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.
|
|
@ -1 +0,0 @@
|
|||
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.
|
|
@ -1 +0,0 @@
|
|||
Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver.
|
|
@ -1 +0,0 @@
|
|||
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.
|
|
@ -1 +0,0 @@
|
|||
User an additional database query when persisting receipts.
|
|
@ -1 +0,0 @@
|
|||
Re-type hint some collections as read-only.
|
|
@ -1 +0,0 @@
|
|||
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.
|
|
@ -1 +0,0 @@
|
|||
Add a check for editable installs if the Rust library needs rebuilding.
|
|
@ -1 +0,0 @@
|
|||
Synapse will now refuse to start if configured to use SQLite < 3.27.
|
|
@ -1 +0,0 @@
|
|||
Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar).
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where the `cache_invalidation_stream_seq` sequence would begin at 1 instead of 2.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Update the script which makes full schema dumps.
|
|
@ -1 +0,0 @@
|
|||
Add a stub Rust crate.
|
|
@ -1 +0,0 @@
|
|||
Simplify the dependency DAG in the tests workflow.
|
|
@ -1 +0,0 @@
|
|||
Remove an old, incorrect migration file.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests.
|
|
@ -1,3 +1,9 @@
|
|||
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.68.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
|
||||
|
||||
matrix-synapse-py3 (1.67.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.67.0.
|
||||
|
|
|
@ -31,7 +31,9 @@ ARG PYTHON_VERSION=3.9
|
|||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
|
@ -76,7 +78,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
|||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
|
@ -137,7 +139,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
|||
### Stage 2: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
|
|
|
@ -17,7 +17,17 @@ ARG SYNAPSE_VERSION=latest
|
|||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
|
||||
FROM postgres:13-bullseye AS postgres_base
|
||||
# now build the final image, based on the Synapse image.
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# copy the postgres installation over from the image we built above
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# initialise the database cluster in /var/lib/postgresql
|
||||
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
|
||||
|
||||
|
@ -25,18 +35,6 @@ FROM postgres:13-bullseye AS postgres_base
|
|||
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
|
||||
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
|
||||
|
||||
# now build the final image, based on the Synapse image.
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# copy the postgres installation over from the image we built above
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# Extend the shared homeserver config to disable rate-limiting,
|
||||
# set Complement's static shared secret, enable registration, amongst other
|
||||
# tweaks to get Synapse ready for testing.
|
||||
|
|
|
@ -1155,3 +1155,41 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
|||
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
|
||||
```
|
||||
|
||||
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
|
||||
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
|
||||
|
||||
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
|
|
@ -126,6 +126,23 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
|
|||
poetry install --extras all --remove-untracked
|
||||
```
|
||||
|
||||
## ...delete everything and start over from scratch?
|
||||
|
||||
```shell
|
||||
# Stop the current virtualenv if active
|
||||
$ deactivate
|
||||
|
||||
# Remove all of the files from the current environment.
|
||||
# Don't worry, even though it says "all", this will only
|
||||
# remove the Poetry virtualenvs for the current project.
|
||||
$ poetry env remove --all
|
||||
|
||||
# Reactivate Poetry shell to create the virtualenv again
|
||||
$ poetry shell
|
||||
# Install everything again
|
||||
$ poetry install --extras all
|
||||
```
|
||||
|
||||
## ...run a command in the `poetry` virtualenv?
|
||||
|
||||
Use `poetry run cmd args` when you need the python virtualenv context.
|
||||
|
@ -256,6 +273,16 @@ from PyPI. (This is what makes poetry seem slow when doing the first
|
|||
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
|
||||
<name of cache>` to see if that fixes things.
|
||||
|
||||
## Remove outdated egg-info
|
||||
|
||||
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
|
||||
install.
|
||||
|
||||
This stores some cached information about dependencies and often conflicts with
|
||||
letting Poetry do the right thing.
|
||||
|
||||
|
||||
|
||||
## Try `--verbose` or `--dry-run` arguments.
|
||||
|
||||
Sometimes useful to see what poetry's internal logic is.
|
||||
|
|
|
@ -45,6 +45,10 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
|
|||
when using a containerized Synapse, as that will prevent it from responding
|
||||
to proxied traffic.)
|
||||
|
||||
Optionally, you can also set
|
||||
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
|
||||
so that the server extracts and re-uses the same request ID format that the
|
||||
reverse proxy is using.
|
||||
|
||||
## Reverse-proxy configuration examples
|
||||
|
||||
|
|
|
@ -89,6 +89,13 @@ process, for example:
|
|||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.68.0
|
||||
|
||||
As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
|
||||
version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
|
||||
now require a recent Rust compiler.
|
||||
|
||||
|
||||
# Upgrading to v1.67.0
|
||||
|
||||
## Direct TCP replication is no longer supported: migrate to Redis
|
||||
|
@ -125,7 +132,7 @@ From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
|
|||
higher. Synapse v1.67.0 will be the last major release supporting SQLite
|
||||
versions 3.22 to 3.26.
|
||||
|
||||
Those using docker images or Debian packages from Matrix.org will not be
|
||||
Those using Docker images or Debian packages from Matrix.org will not be
|
||||
affected. If you have installed from source, you should check the version of
|
||||
SQLite used by Python with:
|
||||
|
||||
|
@ -135,6 +142,7 @@ python -c "import sqlite3; print(sqlite3.sqlite_version)"
|
|||
|
||||
If this is too old, refer to your distribution for advice on upgrading.
|
||||
|
||||
|
||||
# Upgrading to v1.66.0
|
||||
|
||||
## Delegation of email validation no longer supported
|
||||
|
|
|
@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo
|
|||
|
||||
| Part | Explanation |
|
||||
| ----- | ------------ |
|
||||
| AAAA | Timestamp request was logged (not recieved) |
|
||||
| AAAA | Timestamp request was logged (not received) |
|
||||
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
|
||||
| CCCC | Line number in code |
|
||||
| DDDD | Log Level |
|
||||
| EEEE | Request Identifier (This identifier is shared by related log lines)|
|
||||
| FFFF | Source IP (Or X-Forwarded-For if enabled) |
|
||||
| GGGG | Server Port |
|
||||
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) |
|
||||
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).<br/>If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). |
|
||||
| IIII | Total Time to process the request |
|
||||
| JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)|
|
||||
| KKKK | Userland CPU time |
|
||||
|
|
|
@ -434,7 +434,16 @@ Sub-options for each listener include:
|
|||
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
|
||||
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
|
||||
behind a reverse-proxy.
|
||||
behind a [reverse-proxy](../../reverse_proxy.md).
|
||||
|
||||
* `request_id_header`: The header extracted from each incoming request that is
|
||||
used as the basis for the request ID. The request ID is used in
|
||||
[logs](../administration/request_log.md#request-log-format) and tracing to
|
||||
correlate and match up requests. When unset, Synapse will automatically
|
||||
generate sequential request IDs. This option is useful when Synapse is behind
|
||||
a [reverse-proxy](../../reverse_proxy.md).
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
* `resources`: Only valid for an 'http' listener. A list of resources to host
|
||||
on this port. Sub-options for each resource are:
|
||||
|
|
|
@ -524,11 +524,11 @@ python-versions = ">=3.7"
|
|||
|
||||
[[package]]
|
||||
name = "matrix-common"
|
||||
version = "1.2.1"
|
||||
version = "1.3.0"
|
||||
description = "Common utilities for Synapse, Sydent and Sygnal"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
attrs = "*"
|
||||
|
@ -1625,7 +1625,7 @@ url_preview = ["lxml"]
|
|||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "79cfa09d59f9f8b5ef24318fb860df1915f54328692aa56d04331ecbdd92a8cb"
|
||||
content-hash = "1b14fc274d9e2a495a7f864150f3ffcf4d9f585e09a67e53301ae4ef3c2f3e48"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
|
@ -2113,8 +2113,8 @@ markupsafe = [
|
|||
{file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
|
||||
]
|
||||
matrix-common = [
|
||||
{file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"},
|
||||
{file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
|
||||
{file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"},
|
||||
{file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"},
|
||||
]
|
||||
matrix-synapse-ldap3 = [
|
||||
{file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"},
|
||||
|
|
|
@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.67.0"
|
||||
version = "1.68.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
@ -164,7 +164,7 @@ typing-extensions = ">=3.10.0.1"
|
|||
cryptography = ">=3.4.7"
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
ijson = ">=3.1.4"
|
||||
matrix-common = "^1.2.1"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
|
|
|
@ -459,15 +459,6 @@ class Auth:
|
|||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
|
||||
def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.requester = create_requester(service.sender, app_service=service)
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
|
|
|
@ -206,6 +206,7 @@ class HttpListenerConfig:
|
|||
resources: List[HttpResourceConfig] = attr.Factory(list)
|
||||
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
||||
tag: Optional[str] = None
|
||||
request_id_header: Optional[str] = None
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
|
@ -520,9 +521,11 @@ class ServerConfig(Config):
|
|||
):
|
||||
raise ConfigError("allowed_avatar_mimetypes must be a list")
|
||||
|
||||
self.listeners = [
|
||||
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
|
||||
]
|
||||
listeners = config.get("listeners", [])
|
||||
if not isinstance(listeners, list):
|
||||
raise ConfigError("Expected a list", ("listeners",))
|
||||
|
||||
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
|
@ -889,6 +892,9 @@ def read_gc_thresholds(
|
|||
|
||||
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
"""parse a listener config from the config file"""
|
||||
if not isinstance(listener, dict):
|
||||
raise ConfigError("Expected a dictionary", ("listeners", str(num)))
|
||||
|
||||
listener_type = listener["type"]
|
||||
# Raise a helpful error if direct TCP replication is still configured.
|
||||
if listener_type == "replication":
|
||||
|
@ -928,6 +934,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
|||
resources=resources,
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
request_id_header=listener.get("request_id_header"),
|
||||
)
|
||||
|
||||
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
|
|
|
@ -167,7 +167,6 @@ class EventBuilder:
|
|||
"content": self.content,
|
||||
"unsigned": self.unsigned,
|
||||
"depth": depth,
|
||||
"prev_state": [],
|
||||
}
|
||||
|
||||
if self.is_state():
|
||||
|
|
|
@ -906,9 +906,6 @@ class FederationClient(FederationBase):
|
|||
# The protoevent received over the JSON wire may not have all
|
||||
# the required fields. Lets just gloss over that because
|
||||
# there's some we never care about
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
|
||||
ev = builder.create_local_event_from_event_dict(
|
||||
self._clock,
|
||||
self.hostname,
|
||||
|
|
|
@ -188,18 +188,21 @@ class E2eKeysHandler:
|
|||
)
|
||||
invalid_cached_users = cached_users - valid_cached_users
|
||||
if invalid_cached_users:
|
||||
# Fix up results. If we get here, there is either a bug in device
|
||||
# list tracking, or we hit the race mentioned above.
|
||||
# Fix up results. If we get here, it means there was either a bug in
|
||||
# device list tracking, or we hit the race mentioned above.
|
||||
# TODO: In practice, this path is hit fairly often in existing
|
||||
# deployments when clients query the keys of departed remote
|
||||
# users. A background update to mark the appropriate device
|
||||
# lists as unsubscribed is needed.
|
||||
# https://github.com/matrix-org/synapse/issues/13651
|
||||
# Note that this currently introduces a failure mode when clients
|
||||
# are trying to decrypt old messages from a remote user whose
|
||||
# homeserver is no longer available. We may want to consider falling
|
||||
# back to the cached data when we fail to retrieve a device list
|
||||
# over federation for such remote users.
|
||||
user_ids_not_in_cache.update(invalid_cached_users)
|
||||
for invalid_user_id in invalid_cached_users:
|
||||
remote_results.pop(invalid_user_id)
|
||||
# This log message may be removed if it turns out it's almost
|
||||
# entirely triggered by races.
|
||||
logger.error(
|
||||
"Devices for %s were cached, but the server no longer shares "
|
||||
"any rooms with them. The cached device lists are stale.",
|
||||
invalid_cached_users,
|
||||
)
|
||||
|
||||
for user_id, devices in remote_results.items():
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
|
|
|
@ -862,7 +862,15 @@ class FederationEventHandler:
|
|||
self._sanity_check_event(event)
|
||||
except SynapseError as err:
|
||||
logger.warning("Event %s failed sanity check: %s", event_id, err)
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(err)
|
||||
)
|
||||
return
|
||||
except Exception as exc:
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(exc)
|
||||
)
|
||||
raise exc
|
||||
|
||||
try:
|
||||
try:
|
||||
|
@ -897,10 +905,19 @@ class FederationEventHandler:
|
|||
backfilled=backfilled,
|
||||
)
|
||||
except FederationError as e:
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(e)
|
||||
)
|
||||
|
||||
if e.code == 403:
|
||||
logger.warning("Pulled event %s failed history check.", event_id)
|
||||
else:
|
||||
raise
|
||||
except Exception as exc:
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(exc)
|
||||
)
|
||||
raise exc
|
||||
|
||||
@trace
|
||||
async def _compute_event_context_with_maybe_missing_prevs(
|
||||
|
|
|
@ -752,20 +752,12 @@ class EventCreationHandler:
|
|||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
return await self._is_server_notices_room(builder.room_id)
|
||||
return await self.store.is_server_notice_room(builder.room_id)
|
||||
elif membership == Membership.LEAVE:
|
||||
# the user is always allowed to leave (but not kick people)
|
||||
return builder.state_key == requester.user.to_string()
|
||||
return False
|
||||
|
||||
async def _is_server_notices_room(self, room_id: str) -> bool:
|
||||
if self.config.servernotices.server_notices_mxid is None:
|
||||
return False
|
||||
is_server_notices_room = await self.store.check_local_user_in_room(
|
||||
user_id=self.config.servernotices.server_notices_mxid, room_id=room_id
|
||||
)
|
||||
return is_server_notices_room
|
||||
|
||||
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
|
||||
"""Check if a user has accepted the privacy policy
|
||||
|
||||
|
|
|
@ -851,7 +851,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
old_membership == Membership.INVITE
|
||||
and effective_membership_state == Membership.LEAVE
|
||||
):
|
||||
is_blocked = await self._is_server_notice_room(room_id)
|
||||
is_blocked = await self.store.is_server_notice_room(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
|
@ -1631,14 +1631,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
|
||||
return False
|
||||
|
||||
async def _is_server_notice_room(self, room_id: str) -> bool:
|
||||
if self._server_notices_mxid is None:
|
||||
return False
|
||||
is_server_notices_room = await self.store.check_local_user_in_room(
|
||||
user_id=self._server_notices_mxid, room_id=room_id
|
||||
)
|
||||
return is_server_notices_room
|
||||
|
||||
|
||||
class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
|
|
|
@ -72,10 +72,12 @@ class SynapseRequest(Request):
|
|||
site: "SynapseSite",
|
||||
*args: Any,
|
||||
max_request_body_size: int = 1024,
|
||||
request_id_header: Optional[str] = None,
|
||||
**kw: Any,
|
||||
):
|
||||
super().__init__(channel, *args, **kw)
|
||||
self._max_request_body_size = max_request_body_size
|
||||
self.request_id_header = request_id_header
|
||||
self.synapse_site = site
|
||||
self.reactor = site.reactor
|
||||
self._channel = channel # this is used by the tests
|
||||
|
@ -172,7 +174,14 @@ class SynapseRequest(Request):
|
|||
self._opentracing_span = span
|
||||
|
||||
def get_request_id(self) -> str:
|
||||
return "%s-%i" % (self.get_method(), self.request_seq)
|
||||
request_id_value = None
|
||||
if self.request_id_header:
|
||||
request_id_value = self.getHeader(self.request_id_header)
|
||||
|
||||
if request_id_value is None:
|
||||
request_id_value = str(self.request_seq)
|
||||
|
||||
return "%s-%s" % (self.get_method(), request_id_value)
|
||||
|
||||
def get_redacted_uri(self) -> str:
|
||||
"""Gets the redacted URI associated with the request (or placeholder if the URI
|
||||
|
@ -611,12 +620,15 @@ class SynapseSite(Site):
|
|||
proxied = config.http_options.x_forwarded
|
||||
request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||
|
||||
request_id_header = config.http_options.request_id_header
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
channel,
|
||||
self,
|
||||
max_request_body_size=max_request_body_size,
|
||||
queued=queued,
|
||||
request_id_header=request_id_header,
|
||||
)
|
||||
|
||||
self.requestFactory = request_factory # type: ignore
|
||||
|
|
|
@ -198,7 +198,7 @@ class BulkPushRuleEvaluator:
|
|||
return pl_event.content if pl_event else {}, sender_level
|
||||
|
||||
async def _get_mutual_relations(
|
||||
self, event: EventBase, rules: Iterable[Tuple[PushRule, bool]]
|
||||
self, parent_id: str, rules: Iterable[Tuple[PushRule, bool]]
|
||||
) -> Dict[str, Set[Tuple[str, str]]]:
|
||||
"""
|
||||
Fetch event metadata for events which related to the same event as the given event.
|
||||
|
@ -206,7 +206,7 @@ class BulkPushRuleEvaluator:
|
|||
If the given event has no relation information, returns an empty dictionary.
|
||||
|
||||
Args:
|
||||
event_id: The event ID which is targeted by relations.
|
||||
parent_id: The event ID which is targeted by relations.
|
||||
rules: The push rules which will be processed for this event.
|
||||
|
||||
Returns:
|
||||
|
@ -220,12 +220,6 @@ class BulkPushRuleEvaluator:
|
|||
if not self._relations_match_enabled:
|
||||
return {}
|
||||
|
||||
# If the event does not have a relation, then cannot have any mutual
|
||||
# relations.
|
||||
relation = relation_from_event(event)
|
||||
if not relation:
|
||||
return {}
|
||||
|
||||
# Pre-filter to figure out which relation types are interesting.
|
||||
rel_types = set()
|
||||
for rule, enabled in rules:
|
||||
|
@ -246,9 +240,7 @@ class BulkPushRuleEvaluator:
|
|||
return {}
|
||||
|
||||
# If any valid rules were found, fetch the mutual relations.
|
||||
return await self.store.get_mutual_event_relations(
|
||||
relation.parent_id, rel_types
|
||||
)
|
||||
return await self.store.get_mutual_event_relations(parent_id, rel_types)
|
||||
|
||||
@measure_func("action_for_event_by_user")
|
||||
async def action_for_event_by_user(
|
||||
|
@ -281,9 +273,17 @@ class BulkPushRuleEvaluator:
|
|||
sender_power_level,
|
||||
) = await self._get_power_levels_and_sender_level(event, context)
|
||||
|
||||
relations = await self._get_mutual_relations(
|
||||
event, itertools.chain(*rules_by_user.values())
|
||||
)
|
||||
relation = relation_from_event(event)
|
||||
# If the event does not have a relation, then cannot have any mutual
|
||||
# relations or thread ID.
|
||||
relations = {}
|
||||
thread_id = "main"
|
||||
if relation:
|
||||
relations = await self._get_mutual_relations(
|
||||
relation.parent_id, itertools.chain(*rules_by_user.values())
|
||||
)
|
||||
if relation.rel_type == RelationTypes.THREAD:
|
||||
thread_id = relation.parent_id
|
||||
|
||||
evaluator = PushRuleEvaluatorForEvent(
|
||||
event,
|
||||
|
@ -352,6 +352,7 @@ class BulkPushRuleEvaluator:
|
|||
event.event_id,
|
||||
actions_by_user,
|
||||
count_as_unread,
|
||||
thread_id,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ from synapse.rest.admin.users import (
|
|||
SearchUsersRestServlet,
|
||||
ShadowBanRestServlet,
|
||||
UserAdminServlet,
|
||||
UserByExternalId,
|
||||
UserMembershipRestServlet,
|
||||
UserRegisterServlet,
|
||||
UserRestServletV2,
|
||||
|
@ -275,6 +276,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
|||
ListDestinationsRestServlet(hs).register(http_server)
|
||||
RoomMessagesRestServlet(hs).register(http_server)
|
||||
RoomTimestampToEventRestServlet(hs).register(http_server)
|
||||
UserByExternalId(hs).register(http_server)
|
||||
|
||||
# Some servlets only get registered for the main process.
|
||||
if hs.config.worker.worker_app is None:
|
||||
|
|
|
@ -1156,3 +1156,30 @@ class AccountDataRestServlet(RestServlet):
|
|||
"rooms": by_room_data,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class UserByExternalId(RestServlet):
|
||||
"""Find a user based on an external ID from an auth provider"""
|
||||
|
||||
PATTERNS = admin_patterns(
|
||||
"/auth_providers/(?P<provider>[^/]*)/users/(?P<external_id>[^/]*)"
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
provider: str,
|
||||
external_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
|
||||
user_id = await self._store.get_user_by_external_id(provider, external_id)
|
||||
|
||||
if user_id is None:
|
||||
raise NotFoundError("User not found")
|
||||
|
||||
return HTTPStatus.OK, {"user_id": user_id}
|
||||
|
|
|
@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple
|
|||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import StrictBool, StrictStr, constr
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
|
@ -43,6 +44,7 @@ from synapse.metrics import threepid_send_requests
|
|||
from synapse.push.mailer import Mailer
|
||||
from synapse.rest.client.models import (
|
||||
AuthenticationData,
|
||||
ClientSecretStr,
|
||||
EmailRequestTokenBody,
|
||||
MsisdnRequestTokenBody,
|
||||
)
|
||||
|
@ -630,6 +632,11 @@ class ThreepidAddRestServlet(RestServlet):
|
|||
self.auth = hs.get_auth()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
auth: Optional[AuthenticationData] = None
|
||||
client_secret: ClientSecretStr
|
||||
sid: StrictStr
|
||||
|
||||
@interactive_auth_handler
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
if not self.hs.config.registration.enable_3pid_changes:
|
||||
|
@ -639,22 +646,17 @@ class ThreepidAddRestServlet(RestServlet):
|
|||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
assert_params_in_dict(body, ["client_secret", "sid"])
|
||||
sid = body["sid"]
|
||||
client_secret = body["client_secret"]
|
||||
assert_valid_client_secret(client_secret)
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
|
||||
await self.auth_handler.validate_user_via_ui_auth(
|
||||
requester,
|
||||
request,
|
||||
body,
|
||||
body.dict(exclude_unset=True),
|
||||
"add a third-party identifier to your account",
|
||||
)
|
||||
|
||||
validation_session = await self.identity_handler.validate_threepid_session(
|
||||
client_secret, sid
|
||||
body.client_secret, body.sid
|
||||
)
|
||||
if validation_session:
|
||||
await self.auth_handler.add_threepid(
|
||||
|
@ -679,23 +681,20 @@ class ThreepidBindRestServlet(RestServlet):
|
|||
self.identity_handler = hs.get_identity_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
body = parse_json_object_from_request(request)
|
||||
class PostBody(RequestBodyModel):
|
||||
client_secret: ClientSecretStr
|
||||
id_access_token: StrictStr
|
||||
id_server: StrictStr
|
||||
sid: StrictStr
|
||||
|
||||
assert_params_in_dict(
|
||||
body, ["id_server", "sid", "id_access_token", "client_secret"]
|
||||
)
|
||||
id_server = body["id_server"]
|
||||
sid = body["sid"]
|
||||
id_access_token = body["id_access_token"]
|
||||
client_secret = body["client_secret"]
|
||||
assert_valid_client_secret(client_secret)
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
await self.identity_handler.bind_threepid(
|
||||
client_secret, sid, user_id, id_server, id_access_token
|
||||
body.client_secret, body.sid, user_id, body.id_server, body.id_access_token
|
||||
)
|
||||
|
||||
return 200, {}
|
||||
|
@ -711,23 +710,27 @@ class ThreepidUnbindRestServlet(RestServlet):
|
|||
self.auth = hs.get_auth()
|
||||
self.datastore = self.hs.get_datastores().main
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
address: StrictStr
|
||||
id_server: Optional[StrictStr] = None
|
||||
medium: Literal["email", "msisdn"]
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
"""Unbind the given 3pid from a specific identity server, or identity servers that are
|
||||
known to have this 3pid bound
|
||||
"""
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(body, ["medium", "address"])
|
||||
|
||||
medium = body.get("medium")
|
||||
address = body.get("address")
|
||||
id_server = body.get("id_server")
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
|
||||
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
|
||||
# unbind from all identity servers this threepid has been added to in the past
|
||||
result = await self.identity_handler.try_unbind_threepid(
|
||||
requester.user.to_string(),
|
||||
{"address": address, "medium": medium, "id_server": id_server},
|
||||
{
|
||||
"address": body.address,
|
||||
"medium": body.medium,
|
||||
"id_server": body.id_server,
|
||||
},
|
||||
)
|
||||
return 200, {"id_server_unbind_result": "success" if result else "no-support"}
|
||||
|
||||
|
@ -741,21 +744,25 @@ class ThreepidDeleteRestServlet(RestServlet):
|
|||
self.auth = hs.get_auth()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
address: StrictStr
|
||||
id_server: Optional[StrictStr] = None
|
||||
medium: Literal["email", "msisdn"]
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
if not self.hs.config.registration.enable_3pid_changes:
|
||||
raise SynapseError(
|
||||
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(body, ["medium", "address"])
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
try:
|
||||
ret = await self.auth_handler.delete_threepid(
|
||||
user_id, body["medium"], body["address"], body.get("id_server")
|
||||
user_id, body.medium, body.address, body.id_server
|
||||
)
|
||||
except Exception:
|
||||
# NB. This endpoint should succeed if there is nothing to
|
||||
|
|
|
@ -36,18 +36,20 @@ class AuthenticationData(RequestBodyModel):
|
|||
type: Optional[StrictStr] = None
|
||||
|
||||
|
||||
class ThreePidRequestTokenBody(RequestBodyModel):
|
||||
if TYPE_CHECKING:
|
||||
client_secret: StrictStr
|
||||
else:
|
||||
# See also assert_valid_client_secret()
|
||||
client_secret: constr(
|
||||
regex="[0-9a-zA-Z.=_-]", # noqa: F722
|
||||
min_length=0,
|
||||
max_length=255,
|
||||
strict=True,
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
ClientSecretStr = StrictStr
|
||||
else:
|
||||
# See also assert_valid_client_secret()
|
||||
ClientSecretStr = constr(
|
||||
regex="[0-9a-zA-Z.=_-]", # noqa: F722
|
||||
min_length=1,
|
||||
max_length=255,
|
||||
strict=True,
|
||||
)
|
||||
|
||||
|
||||
class ThreepidRequestTokenBody(RequestBodyModel):
|
||||
client_secret: ClientSecretStr
|
||||
id_server: Optional[StrictStr]
|
||||
id_access_token: Optional[StrictStr]
|
||||
next_link: Optional[StrictStr]
|
||||
|
@ -62,7 +64,7 @@ class ThreePidRequestTokenBody(RequestBodyModel):
|
|||
return token
|
||||
|
||||
|
||||
class EmailRequestTokenBody(ThreePidRequestTokenBody):
|
||||
class EmailRequestTokenBody(ThreepidRequestTokenBody):
|
||||
email: StrictStr
|
||||
|
||||
# Canonicalise the email address. The addresses are all stored canonicalised
|
||||
|
@ -80,6 +82,6 @@ else:
|
|||
ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
|
||||
|
||||
|
||||
class MsisdnRequestTokenBody(ThreePidRequestTokenBody):
|
||||
class MsisdnRequestTokenBody(ThreepidRequestTokenBody):
|
||||
country: ISO3116_1_Alpha_2
|
||||
phone_number: StrictStr
|
||||
|
|
|
@ -19,6 +19,8 @@ import shutil
|
|||
from io import BytesIO
|
||||
from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from matrix_common.types.mxc_uri import MXCUri
|
||||
|
||||
import twisted.internet.error
|
||||
import twisted.web.http
|
||||
from twisted.internet.defer import Deferred
|
||||
|
@ -186,7 +188,7 @@ class MediaRepository:
|
|||
content: IO,
|
||||
content_length: int,
|
||||
auth_user: UserID,
|
||||
) -> str:
|
||||
) -> MXCUri:
|
||||
"""Store uploaded content for a local user and return the mxc URL
|
||||
|
||||
Args:
|
||||
|
@ -219,7 +221,7 @@ class MediaRepository:
|
|||
|
||||
await self._generate_thumbnails(None, media_id, media_id, media_type)
|
||||
|
||||
return "mxc://%s/%s" % (self.server_name, media_id)
|
||||
return MXCUri(self.server_name, media_id)
|
||||
|
||||
async def get_local_media(
|
||||
self, request: SynapseRequest, media_id: str, name: Optional[str]
|
||||
|
|
|
@ -101,6 +101,8 @@ class UploadResource(DirectServeJsonResource):
|
|||
# the default 404, as that would just be confusing.
|
||||
raise SynapseError(400, "Bad content")
|
||||
|
||||
logger.info("Uploaded content with URI %r", content_uri)
|
||||
logger.info("Uploaded content with URI '%s'", content_uri)
|
||||
|
||||
respond_with_json(request, 200, {"content_uri": content_uri}, send_cors=True)
|
||||
respond_with_json(
|
||||
request, 200, {"content_uri": str(content_uri)}, send_cors=True
|
||||
)
|
||||
|
|
|
@ -577,6 +577,21 @@ async def _iterative_auth_checks(
|
|||
if ev.rejected_reason is None:
|
||||
auth_events[key] = event_map[ev_id]
|
||||
|
||||
if event.rejected_reason is not None:
|
||||
# Do not admit previously rejected events into state.
|
||||
# TODO: This isn't spec compliant. Events that were previously rejected due
|
||||
# to failing auth checks at their state, but pass auth checks during
|
||||
# state resolution should be accepted. Synapse does not handle the
|
||||
# change of rejection status well, so we preserve the previous
|
||||
# rejection status for now.
|
||||
#
|
||||
# Note that events rejected for non-state reasons, such as having the
|
||||
# wrong auth events, should remain rejected.
|
||||
#
|
||||
# https://spec.matrix.org/v1.2/rooms/v9/#rejected-events
|
||||
# https://github.com/matrix-org/synapse/issues/13797
|
||||
continue
|
||||
|
||||
try:
|
||||
event_auth.check_state_dependent_auth_rules(
|
||||
event,
|
||||
|
|
|
@ -533,6 +533,7 @@ class BackgroundUpdater:
|
|||
index_name: name of index to add
|
||||
table: table to add index to
|
||||
columns: columns/expressions to include in index
|
||||
where_clause: A WHERE clause to specify a partial unique index.
|
||||
unique: true to make a UNIQUE index
|
||||
psql_only: true to only create this index on psql databases (useful
|
||||
for virtual sqlite tables)
|
||||
|
@ -581,9 +582,6 @@ class BackgroundUpdater:
|
|||
def create_index_sqlite(conn: Connection) -> None:
|
||||
# Sqlite doesn't support concurrent creation of indexes.
|
||||
#
|
||||
# We don't use partial indices on SQLite as it wasn't introduced
|
||||
# until 3.8, and wheezy and CentOS 7 have 3.7
|
||||
#
|
||||
# We assume that sqlite doesn't give us invalid indices; however
|
||||
# we may still end up with the index existing but the
|
||||
# background_updates not having been recorded if synapse got shut
|
||||
|
@ -591,12 +589,13 @@ class BackgroundUpdater:
|
|||
# has supported CREATE TABLE|INDEX IF NOT EXISTS since 3.3.0.)
|
||||
sql = (
|
||||
"CREATE %(unique)s INDEX IF NOT EXISTS %(name)s ON %(table)s"
|
||||
" (%(columns)s)"
|
||||
" (%(columns)s) %(where_clause)s"
|
||||
) % {
|
||||
"unique": "UNIQUE" if unique else "",
|
||||
"name": index_name,
|
||||
"table": table,
|
||||
"columns": ", ".join(columns),
|
||||
"where_clause": "WHERE " + where_clause if where_clause else "",
|
||||
}
|
||||
|
||||
c = conn.cursor()
|
||||
|
|
|
@ -1191,6 +1191,7 @@ class DatabasePool:
|
|||
keyvalues: Dict[str, Any],
|
||||
values: Dict[str, Any],
|
||||
insertion_values: Optional[Dict[str, Any]] = None,
|
||||
where_clause: Optional[str] = None,
|
||||
lock: bool = True,
|
||||
) -> bool:
|
||||
"""
|
||||
|
@ -1203,6 +1204,7 @@ class DatabasePool:
|
|||
keyvalues: The unique key tables and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
where_clause: An index predicate to apply to the upsert.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
Returns:
|
||||
|
@ -1213,7 +1215,12 @@ class DatabasePool:
|
|||
|
||||
if table not in self._unsafe_to_upsert_tables:
|
||||
return self.simple_upsert_txn_native_upsert(
|
||||
txn, table, keyvalues, values, insertion_values=insertion_values
|
||||
txn,
|
||||
table,
|
||||
keyvalues,
|
||||
values,
|
||||
insertion_values=insertion_values,
|
||||
where_clause=where_clause,
|
||||
)
|
||||
else:
|
||||
return self.simple_upsert_txn_emulated(
|
||||
|
@ -1222,6 +1229,7 @@ class DatabasePool:
|
|||
keyvalues,
|
||||
values,
|
||||
insertion_values=insertion_values,
|
||||
where_clause=where_clause,
|
||||
lock=lock,
|
||||
)
|
||||
|
||||
|
@ -1232,6 +1240,7 @@ class DatabasePool:
|
|||
keyvalues: Dict[str, Any],
|
||||
values: Dict[str, Any],
|
||||
insertion_values: Optional[Dict[str, Any]] = None,
|
||||
where_clause: Optional[str] = None,
|
||||
lock: bool = True,
|
||||
) -> bool:
|
||||
"""
|
||||
|
@ -1240,6 +1249,7 @@ class DatabasePool:
|
|||
keyvalues: The unique key tables and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
where_clause: An index predicate to apply to the upsert.
|
||||
lock: True to lock the table when doing the upsert.
|
||||
Returns:
|
||||
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||
|
@ -1259,14 +1269,17 @@ class DatabasePool:
|
|||
else:
|
||||
return "%s = ?" % (key,)
|
||||
|
||||
# Generate a where clause of each keyvalue and optionally the provided
|
||||
# index predicate.
|
||||
where = [_getwhere(k) for k in keyvalues]
|
||||
if where_clause:
|
||||
where.append(where_clause)
|
||||
|
||||
if not values:
|
||||
# If `values` is empty, then all of the values we care about are in
|
||||
# the unique key, so there is nothing to UPDATE. We can just do a
|
||||
# SELECT instead to see if it exists.
|
||||
sql = "SELECT 1 FROM %s WHERE %s" % (
|
||||
table,
|
||||
" AND ".join(_getwhere(k) for k in keyvalues),
|
||||
)
|
||||
sql = "SELECT 1 FROM %s WHERE %s" % (table, " AND ".join(where))
|
||||
sqlargs = list(keyvalues.values())
|
||||
txn.execute(sql, sqlargs)
|
||||
if txn.fetchall():
|
||||
|
@ -1277,7 +1290,7 @@ class DatabasePool:
|
|||
sql = "UPDATE %s SET %s WHERE %s" % (
|
||||
table,
|
||||
", ".join("%s = ?" % (k,) for k in values),
|
||||
" AND ".join(_getwhere(k) for k in keyvalues),
|
||||
" AND ".join(where),
|
||||
)
|
||||
sqlargs = list(values.values()) + list(keyvalues.values())
|
||||
|
||||
|
@ -1307,6 +1320,7 @@ class DatabasePool:
|
|||
keyvalues: Dict[str, Any],
|
||||
values: Dict[str, Any],
|
||||
insertion_values: Optional[Dict[str, Any]] = None,
|
||||
where_clause: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Use the native UPSERT functionality in PostgreSQL.
|
||||
|
@ -1316,6 +1330,7 @@ class DatabasePool:
|
|||
keyvalues: The unique key tables and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
where_clause: An index predicate to apply to the upsert.
|
||||
|
||||
Returns:
|
||||
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||
|
@ -1331,11 +1346,12 @@ class DatabasePool:
|
|||
allvalues.update(values)
|
||||
latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
|
||||
|
||||
sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % (
|
||||
table,
|
||||
", ".join(k for k in allvalues),
|
||||
", ".join("?" for _ in allvalues),
|
||||
", ".join(k for k in keyvalues),
|
||||
f"WHERE {where_clause}" if where_clause else "",
|
||||
latter,
|
||||
)
|
||||
txn.execute(sql, list(allvalues.values()))
|
||||
|
|
|
@ -1294,6 +1294,51 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
|||
|
||||
return event_id_results
|
||||
|
||||
@trace
|
||||
async def record_event_failed_pull_attempt(
|
||||
self, room_id: str, event_id: str, cause: str
|
||||
) -> None:
|
||||
"""
|
||||
Record when we fail to pull an event over federation.
|
||||
|
||||
This information allows us to be more intelligent when we decide to
|
||||
retry (we don't need to fail over and over) and we can process that
|
||||
event in the background so we don't block on it each time.
|
||||
|
||||
Args:
|
||||
room_id: The room where the event failed to pull from
|
||||
event_id: The event that failed to be fetched or processed
|
||||
cause: The error message or reason that we failed to pull the event
|
||||
"""
|
||||
await self.db_pool.runInteraction(
|
||||
"record_event_failed_pull_attempt",
|
||||
self._record_event_failed_pull_attempt_upsert_txn,
|
||||
room_id,
|
||||
event_id,
|
||||
cause,
|
||||
db_autocommit=True, # Safe as it's a single upsert
|
||||
)
|
||||
|
||||
def _record_event_failed_pull_attempt_upsert_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
cause: str,
|
||||
) -> None:
|
||||
sql = """
|
||||
INSERT INTO event_failed_pull_attempts (
|
||||
room_id, event_id, num_attempts, last_attempt_ts, last_cause
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (room_id, event_id) DO UPDATE SET
|
||||
num_attempts=event_failed_pull_attempts.num_attempts + 1,
|
||||
last_attempt_ts=EXCLUDED.last_attempt_ts,
|
||||
last_cause=EXCLUDED.last_cause;
|
||||
"""
|
||||
|
||||
txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause))
|
||||
|
||||
async def get_missing_events(
|
||||
self,
|
||||
room_id: str,
|
||||
|
|
|
@ -98,6 +98,7 @@ from synapse.storage.database import (
|
|||
)
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
|
@ -232,6 +233,104 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
replaces_index="event_push_summary_user_rm",
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
"event_push_summary_unique_index2",
|
||||
index_name="event_push_summary_unique_index2",
|
||||
table="event_push_summary",
|
||||
columns=["user_id", "room_id", "thread_id"],
|
||||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"event_push_backfill_thread_id",
|
||||
self._background_backfill_thread_id,
|
||||
)
|
||||
|
||||
async def _background_backfill_thread_id(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""
|
||||
Fill in the thread_id field for event_push_actions and event_push_summary.
|
||||
|
||||
This is preparatory so that it can be made non-nullable in the future.
|
||||
|
||||
Because all current (null) data is done in an unthreaded manner this
|
||||
simply assumes it is on the "main" timeline. Since event_push_actions
|
||||
are periodically cleared it is not possible to correctly re-calculate
|
||||
the thread_id.
|
||||
"""
|
||||
event_push_actions_done = progress.get("event_push_actions_done", False)
|
||||
|
||||
def add_thread_id_txn(
|
||||
txn: LoggingTransaction, table_name: str, start_stream_ordering: int
|
||||
) -> int:
|
||||
sql = f"""
|
||||
SELECT stream_ordering
|
||||
FROM {table_name}
|
||||
WHERE
|
||||
thread_id IS NULL
|
||||
AND stream_ordering > ?
|
||||
ORDER BY stream_ordering
|
||||
LIMIT ?
|
||||
"""
|
||||
txn.execute(sql, (start_stream_ordering, batch_size))
|
||||
|
||||
# No more rows to process.
|
||||
rows = txn.fetchall()
|
||||
if not rows:
|
||||
progress[f"{table_name}_done"] = True
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, "event_push_backfill_thread_id", progress
|
||||
)
|
||||
return 0
|
||||
|
||||
# Update the thread ID for any of those rows.
|
||||
max_stream_ordering = rows[-1][0]
|
||||
|
||||
sql = f"""
|
||||
UPDATE {table_name}
|
||||
SET thread_id = 'main'
|
||||
WHERE stream_ordering <= ? AND thread_id IS NULL
|
||||
"""
|
||||
txn.execute(sql, (max_stream_ordering,))
|
||||
|
||||
# Update progress.
|
||||
processed_rows = txn.rowcount
|
||||
progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, "event_push_backfill_thread_id", progress
|
||||
)
|
||||
|
||||
return processed_rows
|
||||
|
||||
# First update the event_push_actions table, then the event_push_summary table.
|
||||
#
|
||||
# Note that the event_push_actions_staging table is ignored since it is
|
||||
# assumed that items in that table will only exist for a short period of
|
||||
# time.
|
||||
if not event_push_actions_done:
|
||||
result = await self.db_pool.runInteraction(
|
||||
"event_push_backfill_thread_id",
|
||||
add_thread_id_txn,
|
||||
"event_push_actions",
|
||||
progress.get("max_event_push_actions_stream_ordering", 0),
|
||||
)
|
||||
else:
|
||||
result = await self.db_pool.runInteraction(
|
||||
"event_push_backfill_thread_id",
|
||||
add_thread_id_txn,
|
||||
"event_push_summary",
|
||||
progress.get("max_event_push_summary_stream_ordering", 0),
|
||||
)
|
||||
|
||||
# Only done after the event_push_summary table is done.
|
||||
if not result:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
"event_push_backfill_thread_id"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@cached(tree=True, max_entries=5000)
|
||||
async def get_unread_event_push_actions_by_room_for_user(
|
||||
self,
|
||||
|
@ -670,6 +769,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
event_id: str,
|
||||
user_id_actions: Dict[str, Collection[Union[Mapping, str]]],
|
||||
count_as_unread: bool,
|
||||
thread_id: str,
|
||||
) -> None:
|
||||
"""Add the push actions for the event to the push action staging area.
|
||||
|
||||
|
@ -678,6 +778,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
user_id_actions: A mapping of user_id to list of push actions, where
|
||||
an action can either be a string or dict.
|
||||
count_as_unread: Whether this event should increment unread counts.
|
||||
thread_id: The thread this event is parent of, if applicable.
|
||||
"""
|
||||
if not user_id_actions:
|
||||
return
|
||||
|
@ -686,7 +787,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
# can be used to insert into the `event_push_actions_staging` table.
|
||||
def _gen_entry(
|
||||
user_id: str, actions: Collection[Union[Mapping, str]]
|
||||
) -> Tuple[str, str, str, int, int, int]:
|
||||
) -> Tuple[str, str, str, int, int, int, str]:
|
||||
is_highlight = 1 if _action_has_highlight(actions) else 0
|
||||
notif = 1 if "notify" in actions else 0
|
||||
return (
|
||||
|
@ -696,11 +797,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
notif, # notif column
|
||||
is_highlight, # highlight column
|
||||
int(count_as_unread), # unread column
|
||||
thread_id, # thread_id column
|
||||
)
|
||||
|
||||
await self.db_pool.simple_insert_many(
|
||||
"event_push_actions_staging",
|
||||
keys=("event_id", "user_id", "actions", "notif", "highlight", "unread"),
|
||||
keys=(
|
||||
"event_id",
|
||||
"user_id",
|
||||
"actions",
|
||||
"notif",
|
||||
"highlight",
|
||||
"unread",
|
||||
"thread_id",
|
||||
),
|
||||
values=[
|
||||
_gen_entry(user_id, actions)
|
||||
for user_id, actions in user_id_actions.items()
|
||||
|
@ -981,6 +1091,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
)
|
||||
|
||||
# Replace the previous summary with the new counts.
|
||||
#
|
||||
# TODO(threads): Upsert per-thread instead of setting them all to main.
|
||||
self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
table="event_push_summary",
|
||||
|
@ -990,6 +1102,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
"unread_count": unread_count,
|
||||
"stream_ordering": old_rotate_stream_ordering,
|
||||
"last_receipt_stream_ordering": stream_ordering,
|
||||
"thread_id": "main",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -1138,17 +1251,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
|
||||
logger.info("Rotating notifications, handling %d rows", len(summaries))
|
||||
|
||||
# TODO(threads): Update on a per-thread basis.
|
||||
self.db_pool.simple_upsert_many_txn(
|
||||
txn,
|
||||
table="event_push_summary",
|
||||
key_names=("user_id", "room_id"),
|
||||
key_values=[(user_id, room_id) for user_id, room_id in summaries],
|
||||
value_names=("notif_count", "unread_count", "stream_ordering"),
|
||||
value_names=("notif_count", "unread_count", "stream_ordering", "thread_id"),
|
||||
value_values=[
|
||||
(
|
||||
summary.notif_count,
|
||||
summary.unread_count,
|
||||
summary.stream_ordering,
|
||||
"main",
|
||||
)
|
||||
for summary in summaries.values()
|
||||
],
|
||||
|
@ -1255,7 +1370,6 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
|
|||
table="event_push_actions",
|
||||
columns=["highlight", "stream_ordering"],
|
||||
where_clause="highlight=0",
|
||||
psql_only=True,
|
||||
)
|
||||
|
||||
async def get_push_actions_for_user(
|
||||
|
|
|
@ -2192,9 +2192,9 @@ class PersistEventsStore:
|
|||
sql = """
|
||||
INSERT INTO event_push_actions (
|
||||
room_id, event_id, user_id, actions, stream_ordering,
|
||||
topological_ordering, notif, highlight, unread
|
||||
topological_ordering, notif, highlight, unread, thread_id
|
||||
)
|
||||
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread
|
||||
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread, thread_id
|
||||
FROM event_push_actions_staging
|
||||
WHERE event_id = ?
|
||||
"""
|
||||
|
@ -2435,17 +2435,31 @@ class PersistEventsStore:
|
|||
"DELETE FROM event_backward_extremities"
|
||||
" WHERE event_id = ? AND room_id = ?"
|
||||
)
|
||||
backward_extremity_tuples_to_remove = [
|
||||
(ev.event_id, ev.room_id)
|
||||
for ev in events
|
||||
if not ev.internal_metadata.is_outlier()
|
||||
# If we encountered an event with no prev_events, then we might
|
||||
# as well remove it now because it won't ever have anything else
|
||||
# to backfill from.
|
||||
or len(ev.prev_event_ids()) == 0
|
||||
]
|
||||
txn.execute_batch(
|
||||
query,
|
||||
[
|
||||
(ev.event_id, ev.room_id)
|
||||
for ev in events
|
||||
if not ev.internal_metadata.is_outlier()
|
||||
# If we encountered an event with no prev_events, then we might
|
||||
# as well remove it now because it won't ever have anything else
|
||||
# to backfill from.
|
||||
or len(ev.prev_event_ids()) == 0
|
||||
],
|
||||
backward_extremity_tuples_to_remove,
|
||||
)
|
||||
|
||||
# Clear out the failed backfill attempts after we successfully pulled
|
||||
# the event. Since we no longer need these events as backward
|
||||
# extremities, it also means that they won't be backfilled from again so
|
||||
# we no longer need to store the backfill attempts around it.
|
||||
query = """
|
||||
DELETE FROM event_failed_pull_attempts
|
||||
WHERE event_id = ? and room_id = ?
|
||||
"""
|
||||
txn.execute_batch(
|
||||
query,
|
||||
backward_extremity_tuples_to_remove,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -419,6 +419,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
|||
"event_forward_extremities",
|
||||
"event_push_actions",
|
||||
"event_search",
|
||||
"event_failed_pull_attempts",
|
||||
"partial_state_events",
|
||||
"events",
|
||||
"federation_inbound_events_staging",
|
||||
|
@ -441,6 +442,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
|||
"e2e_room_keys",
|
||||
"event_push_summary",
|
||||
"pusher_throttle",
|
||||
"insertion_events",
|
||||
"insertion_event_extremities",
|
||||
"insertion_event_edges",
|
||||
"batch_events",
|
||||
"room_account_data",
|
||||
"room_tags",
|
||||
# "rooms" happens last, to keep the foreign keys in the other tables
|
||||
|
|
|
@ -113,6 +113,24 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
prefilled_cache=receipts_stream_prefill,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
"receipts_linearized_unique_index",
|
||||
index_name="receipts_linearized_unique_index",
|
||||
table="receipts_linearized",
|
||||
columns=["room_id", "receipt_type", "user_id"],
|
||||
where_clause="thread_id IS NULL",
|
||||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
"receipts_graph_unique_index",
|
||||
index_name="receipts_graph_unique_index",
|
||||
table="receipts_graph",
|
||||
columns=["room_id", "receipt_type", "user_id"],
|
||||
where_clause="thread_id IS NULL",
|
||||
unique=True,
|
||||
)
|
||||
|
||||
def get_max_receipt_stream_id(self) -> int:
|
||||
"""Get the current max stream ID for receipts stream"""
|
||||
return self._receipts_id_gen.get_current_token()
|
||||
|
@ -677,6 +695,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
"event_id": event_id,
|
||||
"event_stream_ordering": stream_ordering,
|
||||
"data": json_encoder.encode(data),
|
||||
"thread_id": None,
|
||||
},
|
||||
# receipts_linearized has a unique constraint on
|
||||
# (user_id, room_id, receipt_type), so no need to lock
|
||||
|
@ -824,6 +843,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
values={
|
||||
"event_ids": json_encoder.encode(event_ids),
|
||||
"data": json_encoder.encode(data),
|
||||
"thread_id": None,
|
||||
},
|
||||
# receipts_graph has a unique constraint on
|
||||
# (user_id, room_id, receipt_type), so no need to lock
|
||||
|
|
|
@ -88,6 +88,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
|||
# at a time. Keyed by room_id.
|
||||
self._joined_host_linearizer = Linearizer("_JoinedHostsCache")
|
||||
|
||||
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||
|
||||
if (
|
||||
self.hs.config.worker.run_background_tasks
|
||||
and self.hs.config.metrics.metrics_flags.known_servers
|
||||
|
@ -504,6 +506,21 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
|||
|
||||
return membership == Membership.JOIN
|
||||
|
||||
async def is_server_notice_room(self, room_id: str) -> bool:
|
||||
"""
|
||||
Determines whether the given room is a 'Server Notices' room, used for
|
||||
sending server notices to a user.
|
||||
|
||||
This is determined by seeing whether the server notices user is present
|
||||
in the room.
|
||||
"""
|
||||
if self._server_notices_mxid is None:
|
||||
return False
|
||||
is_server_notices_room = await self.check_local_user_in_room(
|
||||
user_id=self._server_notices_mxid, room_id=room_id
|
||||
)
|
||||
return is_server_notices_room
|
||||
|
||||
async def get_local_current_membership_for_user_in_room(
|
||||
self, user_id: str, room_id: str
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
SCHEMA_VERSION = 72 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 73 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
|
@ -77,6 +77,14 @@ Changes in SCHEMA_VERSION = 72:
|
|||
- Tables related to groups are dropped.
|
||||
- Unused column application_services_state.last_txn is dropped
|
||||
- Cache invalidation stream id sequence now begins at 2 to match code expectation.
|
||||
|
||||
Changes in SCHEMA_VERSION = 73;
|
||||
- thread_id column is added to event_push_actions, event_push_actions_staging
|
||||
event_push_summary, receipts_linearized, and receipts_graph.
|
||||
- Add table `event_failed_pull_attempts` to keep track when we fail to pull
|
||||
events over federation.
|
||||
- Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`,
|
||||
`batch_events`) to make it easy to delete all associated rows when purging a room.
|
||||
"""
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Add a nullable column for thread ID to the event push actions tables; this
|
||||
-- will be filled in with a default value for any previously existing rows.
|
||||
--
|
||||
-- After migration this can be made non-nullable.
|
||||
|
||||
ALTER TABLE event_push_actions_staging ADD COLUMN thread_id TEXT;
|
||||
ALTER TABLE event_push_actions ADD COLUMN thread_id TEXT;
|
||||
ALTER TABLE event_push_summary ADD COLUMN thread_id TEXT;
|
||||
|
||||
-- Update the unique index for `event_push_summary`.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(7006, 'event_push_summary_unique_index2', '{}');
|
||||
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
|
||||
(7006, 'event_push_backfill_thread_id', '{}', 'event_push_summary_unique_index2');
|
|
@ -0,0 +1,30 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Add a nullable column for thread ID to the receipts table; this allows a
|
||||
-- receipt per user, per room, as well as an unthreaded receipt (corresponding
|
||||
-- to a null thread ID).
|
||||
|
||||
ALTER TABLE receipts_linearized ADD COLUMN thread_id TEXT;
|
||||
ALTER TABLE receipts_graph ADD COLUMN thread_id TEXT;
|
||||
|
||||
-- Rebuild the unique constraint with the thread_id.
|
||||
ALTER TABLE receipts_linearized
|
||||
ADD CONSTRAINT receipts_linearized_uniqueness_thread
|
||||
UNIQUE (room_id, receipt_type, user_id, thread_id);
|
||||
|
||||
ALTER TABLE receipts_graph
|
||||
ADD CONSTRAINT receipts_graph_uniqueness_thread
|
||||
UNIQUE (room_id, receipt_type, user_id, thread_id);
|
|
@ -0,0 +1,70 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Allow multiple receipts per user per room via a nullable thread_id column.
|
||||
--
|
||||
-- SQLite doesn't support modifying constraints to an existing table, so it must
|
||||
-- be recreated.
|
||||
|
||||
-- Create the new tables.
|
||||
CREATE TABLE receipts_linearized_new (
|
||||
stream_id BIGINT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
receipt_type TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
event_id TEXT NOT NULL,
|
||||
thread_id TEXT,
|
||||
event_stream_ordering BIGINT,
|
||||
data TEXT NOT NULL,
|
||||
CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id),
|
||||
CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
|
||||
);
|
||||
|
||||
CREATE TABLE receipts_graph_new (
|
||||
room_id TEXT NOT NULL,
|
||||
receipt_type TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
event_ids TEXT NOT NULL,
|
||||
thread_id TEXT,
|
||||
data TEXT NOT NULL,
|
||||
CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id),
|
||||
CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
|
||||
);
|
||||
|
||||
-- Drop the old indexes.
|
||||
DROP INDEX IF EXISTS receipts_linearized_id;
|
||||
DROP INDEX IF EXISTS receipts_linearized_room_stream;
|
||||
DROP INDEX IF EXISTS receipts_linearized_user;
|
||||
|
||||
-- Copy the data.
|
||||
INSERT INTO receipts_linearized_new (stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data)
|
||||
SELECT stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data
|
||||
FROM receipts_linearized;
|
||||
INSERT INTO receipts_graph_new (room_id, receipt_type, user_id, event_ids, data)
|
||||
SELECT room_id, receipt_type, user_id, event_ids, data
|
||||
FROM receipts_graph;
|
||||
|
||||
-- Drop the old tables.
|
||||
DROP TABLE receipts_linearized;
|
||||
DROP TABLE receipts_graph;
|
||||
|
||||
-- Rename the tables.
|
||||
ALTER TABLE receipts_linearized_new RENAME TO receipts_linearized;
|
||||
ALTER TABLE receipts_graph_new RENAME TO receipts_graph;
|
||||
|
||||
-- Create the indices.
|
||||
CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
|
||||
CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
|
||||
CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
|
|
@ -0,0 +1,20 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(7007, 'receipts_linearized_unique_index', '{}');
|
||||
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(7007, 'receipts_graph_unique_index', '{}');
|
|
@ -0,0 +1,56 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- SQLite needs to rebuild indices which use partial indices on Postgres, but
|
||||
-- previously did not use them on SQLite.
|
||||
|
||||
-- Drop each index that was added with register_background_index_update AND specified
|
||||
-- a where_clause (that existed before this delta).
|
||||
|
||||
-- From events_bg_updates.py
|
||||
DROP INDEX IF EXISTS event_contains_url_index;
|
||||
-- There is also a redactions_censored_redacts index, but that gets dropped.
|
||||
DROP INDEX IF EXISTS redactions_have_censored_ts;
|
||||
-- There is also a PostgreSQL only index (event_contains_url_index2)
|
||||
-- which gets renamed to event_contains_url_index.
|
||||
|
||||
-- From roommember.py
|
||||
DROP INDEX IF EXISTS room_memberships_user_room_forgotten;
|
||||
|
||||
-- From presence.py
|
||||
DROP INDEX IF EXISTS presence_stream_state_not_offline_idx;
|
||||
|
||||
-- From media_repository.py
|
||||
DROP INDEX IF EXISTS local_media_repository_url_idx;
|
||||
|
||||
-- From event_push_actions.py
|
||||
DROP INDEX IF EXISTS event_push_actions_highlights_index;
|
||||
-- There's also a event_push_actions_stream_highlight_index which was previously
|
||||
-- PostgreSQL-only.
|
||||
|
||||
-- From state.py
|
||||
DROP INDEX IF EXISTS current_state_events_member_index;
|
||||
|
||||
-- Re-insert the background jobs to re-create the indices.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
|
||||
(7209, 'event_contains_url_index', '{}', NULL),
|
||||
(7209, 'redactions_have_censored_ts_idx', '{}', NULL),
|
||||
(7209, 'room_membership_forgotten_idx', '{}', NULL),
|
||||
(7209, 'presence_stream_not_offline_index', '{}', NULL),
|
||||
(7209, 'local_media_repository_url_idx', '{}', NULL),
|
||||
(7209, 'event_push_actions_highlights_index', '{}', NULL),
|
||||
(7209, 'event_push_actions_stream_highlight_index', '{}', NULL),
|
||||
(7209, 'current_state_members_idx', '{}', NULL)
|
||||
ON CONFLICT (update_name) DO NOTHING;
|
|
@ -0,0 +1,29 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- Add a table that keeps track of when we failed to pull an event over
|
||||
-- federation (via /backfill, `/event`, `/get_missing_events`, etc). This allows
|
||||
-- us to be more intelligent when we decide to retry (we don't need to fail over
|
||||
-- and over) and we can process that event in the background so we don't block
|
||||
-- on it each time.
|
||||
CREATE TABLE IF NOT EXISTS event_failed_pull_attempts(
|
||||
room_id TEXT NOT NULL REFERENCES rooms (room_id),
|
||||
event_id TEXT NOT NULL,
|
||||
num_attempts INT NOT NULL,
|
||||
last_attempt_ts BIGINT NOT NULL,
|
||||
last_cause TEXT NOT NULL,
|
||||
PRIMARY KEY (room_id, event_id)
|
||||
);
|
|
@ -0,0 +1,22 @@
|
|||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Add index so we can easily purge all rows from a given `room_id`
|
||||
CREATE INDEX IF NOT EXISTS event_failed_pull_attempts_room_id ON event_failed_pull_attempts(room_id);
|
||||
|
||||
-- MSC2716 related tables:
|
||||
-- Add indexes so we can easily purge all rows from a given `room_id`
|
||||
CREATE INDEX IF NOT EXISTS insertion_events_room_id ON insertion_events(room_id);
|
||||
CREATE INDEX IF NOT EXISTS batch_events_room_id ON batch_events(room_id);
|
|
@ -205,8 +205,9 @@ def register_cache(
|
|||
add_resizable_cache(cache_name, resize_callback)
|
||||
|
||||
metric = CacheMetric(cache, cache_type, cache_name, collect_callback)
|
||||
metric_name = "cache_%s_%s" % (cache_type, cache_name)
|
||||
caches_by_name[cache_name] = cache
|
||||
CACHE_METRIC_REGISTRY.register_hook(metric.collect)
|
||||
CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect)
|
||||
return metric
|
||||
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
import logging
|
||||
from functools import wraps
|
||||
from types import TracebackType
|
||||
from typing import Awaitable, Callable, Generator, List, Optional, Type, TypeVar
|
||||
from typing import Awaitable, Callable, Dict, Generator, Optional, Type, TypeVar
|
||||
|
||||
from prometheus_client import CollectorRegistry, Counter, Metric
|
||||
from typing_extensions import Concatenate, ParamSpec, Protocol
|
||||
|
@ -220,21 +220,21 @@ class DynamicCollectorRegistry(CollectorRegistry):
|
|||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._pre_update_hooks: List[Callable[[], None]] = []
|
||||
self._pre_update_hooks: Dict[str, Callable[[], None]] = {}
|
||||
|
||||
def collect(self) -> Generator[Metric, None, None]:
|
||||
"""
|
||||
Collects metrics, calling pre-update hooks first.
|
||||
"""
|
||||
|
||||
for pre_update_hook in self._pre_update_hooks:
|
||||
for pre_update_hook in self._pre_update_hooks.values():
|
||||
pre_update_hook()
|
||||
|
||||
yield from super().collect()
|
||||
|
||||
def register_hook(self, hook: Callable[[], None]) -> None:
|
||||
def register_hook(self, metric_name: str, hook: Callable[[], None]) -> None:
|
||||
"""
|
||||
Registers a hook that is called before metric collection.
|
||||
"""
|
||||
|
||||
self._pre_update_hooks.append(hook)
|
||||
self._pre_update_hooks[metric_name] = hook
|
||||
|
|
|
@ -11,14 +11,23 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional
|
||||
from unittest import mock
|
||||
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.event_auth import (
|
||||
check_state_dependent_auth_rules,
|
||||
check_state_independent_auth_rules,
|
||||
)
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.transport.client import StateRequestResponse
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login, room
|
||||
from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from tests import unittest
|
||||
from tests.test_utils import event_injection, make_awaitable
|
||||
|
@ -227,3 +236,615 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
|
|||
|
||||
if prev_exists_as_outlier:
|
||||
self.mock_federation_transport_client.get_event.assert_not_called()
|
||||
|
||||
def test_process_pulled_event_records_failed_backfill_attempts(
|
||||
self,
|
||||
) -> None:
|
||||
"""
|
||||
Test to make sure that failed backfill attempts for an event are
|
||||
recorded in the `event_failed_pull_attempts` table.
|
||||
|
||||
In this test, we pretend we are processing a "pulled" event via
|
||||
backfill. The pulled event has a fake `prev_event` which our server has
|
||||
obviously never seen before so it attempts to request the state at that
|
||||
`prev_event` which expectedly fails because it's a fake event. Because
|
||||
the server can't fetch the state at the missing `prev_event`, the
|
||||
"pulled" event fails the history check and is fails to process.
|
||||
|
||||
We check that we correctly record the number of failed pull attempts
|
||||
of the pulled event and as a sanity check, that the "pulled" event isn't
|
||||
persisted.
|
||||
"""
|
||||
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
|
||||
main_store = self.hs.get_datastores().main
|
||||
|
||||
# Create the room
|
||||
user_id = self.register_user("kermit", "test")
|
||||
tok = self.login("kermit", "test")
|
||||
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
|
||||
room_version = self.get_success(main_store.get_room_version(room_id))
|
||||
|
||||
# We expect an outbound request to /state_ids, so stub that out
|
||||
self.mock_federation_transport_client.get_room_state_ids.return_value = make_awaitable(
|
||||
{
|
||||
# Mimic the other server not knowing about the state at all.
|
||||
# We want to cause Synapse to throw an error (`Unable to get
|
||||
# missing prev_event $fake_prev_event`) and fail to backfill
|
||||
# the pulled event.
|
||||
"pdu_ids": [],
|
||||
"auth_chain_ids": [],
|
||||
}
|
||||
)
|
||||
# We also expect an outbound request to /state
|
||||
self.mock_federation_transport_client.get_room_state.return_value = make_awaitable(
|
||||
StateRequestResponse(
|
||||
# Mimic the other server not knowing about the state at all.
|
||||
# We want to cause Synapse to throw an error (`Unable to get
|
||||
# missing prev_event $fake_prev_event`) and fail to backfill
|
||||
# the pulled event.
|
||||
auth_events=[],
|
||||
state=[],
|
||||
)
|
||||
)
|
||||
|
||||
pulled_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "test_regular_type",
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [
|
||||
# The fake prev event will make the pulled event fail
|
||||
# the history check (`Unable to get missing prev_event
|
||||
# $fake_prev_event`)
|
||||
"$fake_prev_event"
|
||||
],
|
||||
"auth_events": [],
|
||||
"origin_server_ts": 1,
|
||||
"depth": 12,
|
||||
"content": {"body": "pulled"},
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
|
||||
# The function under test: try to process the pulled event
|
||||
with LoggingContext("test"):
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME, pulled_event, backfilled=True
|
||||
)
|
||||
)
|
||||
|
||||
# Make sure our failed pull attempt was recorded
|
||||
backfill_num_attempts = self.get_success(
|
||||
main_store.db_pool.simple_select_one_onecol(
|
||||
table="event_failed_pull_attempts",
|
||||
keyvalues={"event_id": pulled_event.event_id},
|
||||
retcol="num_attempts",
|
||||
)
|
||||
)
|
||||
self.assertEqual(backfill_num_attempts, 1)
|
||||
|
||||
# The function under test: try to process the pulled event again
|
||||
with LoggingContext("test"):
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME, pulled_event, backfilled=True
|
||||
)
|
||||
)
|
||||
|
||||
# Make sure our second failed pull attempt was recorded (`num_attempts` was incremented)
|
||||
backfill_num_attempts = self.get_success(
|
||||
main_store.db_pool.simple_select_one_onecol(
|
||||
table="event_failed_pull_attempts",
|
||||
keyvalues={"event_id": pulled_event.event_id},
|
||||
retcol="num_attempts",
|
||||
)
|
||||
)
|
||||
self.assertEqual(backfill_num_attempts, 2)
|
||||
|
||||
# And as a sanity check, make sure the event was not persisted through all of this.
|
||||
persisted = self.get_success(
|
||||
main_store.get_event(pulled_event.event_id, allow_none=True)
|
||||
)
|
||||
self.assertIsNone(
|
||||
persisted,
|
||||
"pulled event that fails the history check should not be persisted at all",
|
||||
)
|
||||
|
||||
def test_process_pulled_event_clears_backfill_attempts_after_being_successfully_persisted(
|
||||
self,
|
||||
) -> None:
|
||||
"""
|
||||
Test to make sure that failed pull attempts
|
||||
(`event_failed_pull_attempts` table) for an event are cleared after the
|
||||
event is successfully persisted.
|
||||
|
||||
In this test, we pretend we are processing a "pulled" event via
|
||||
backfill. The pulled event succesfully processes and the backward
|
||||
extremeties are updated along with clearing out any failed pull attempts
|
||||
for those old extremities.
|
||||
|
||||
We check that we correctly cleared failed pull attempts of the
|
||||
pulled event.
|
||||
"""
|
||||
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
|
||||
main_store = self.hs.get_datastores().main
|
||||
|
||||
# Create the room
|
||||
user_id = self.register_user("kermit", "test")
|
||||
tok = self.login("kermit", "test")
|
||||
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
|
||||
room_version = self.get_success(main_store.get_room_version(room_id))
|
||||
|
||||
# allow the remote user to send state events
|
||||
self.helper.send_state(
|
||||
room_id,
|
||||
"m.room.power_levels",
|
||||
{"events_default": 0, "state_default": 0},
|
||||
tok=tok,
|
||||
)
|
||||
|
||||
# add the remote user to the room
|
||||
member_event = self.get_success(
|
||||
event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
|
||||
)
|
||||
|
||||
initial_state_map = self.get_success(
|
||||
main_store.get_partial_current_state_ids(room_id)
|
||||
)
|
||||
|
||||
auth_event_ids = [
|
||||
initial_state_map[("m.room.create", "")],
|
||||
initial_state_map[("m.room.power_levels", "")],
|
||||
member_event.event_id,
|
||||
]
|
||||
|
||||
pulled_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "test_regular_type",
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [member_event.event_id],
|
||||
"auth_events": auth_event_ids,
|
||||
"origin_server_ts": 1,
|
||||
"depth": 12,
|
||||
"content": {"body": "pulled"},
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
|
||||
# Fake the "pulled" event failing to backfill once so we can test
|
||||
# if it's cleared out later on.
|
||||
self.get_success(
|
||||
main_store.record_event_failed_pull_attempt(
|
||||
pulled_event.room_id, pulled_event.event_id, "fake cause"
|
||||
)
|
||||
)
|
||||
# Make sure we have a failed pull attempt recorded for the pulled event
|
||||
backfill_num_attempts = self.get_success(
|
||||
main_store.db_pool.simple_select_one_onecol(
|
||||
table="event_failed_pull_attempts",
|
||||
keyvalues={"event_id": pulled_event.event_id},
|
||||
retcol="num_attempts",
|
||||
)
|
||||
)
|
||||
self.assertEqual(backfill_num_attempts, 1)
|
||||
|
||||
# The function under test: try to process the pulled event
|
||||
with LoggingContext("test"):
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME, pulled_event, backfilled=True
|
||||
)
|
||||
)
|
||||
|
||||
# Make sure the failed pull attempts for the pulled event are cleared
|
||||
backfill_num_attempts = self.get_success(
|
||||
main_store.db_pool.simple_select_one_onecol(
|
||||
table="event_failed_pull_attempts",
|
||||
keyvalues={"event_id": pulled_event.event_id},
|
||||
retcol="num_attempts",
|
||||
allow_none=True,
|
||||
)
|
||||
)
|
||||
self.assertIsNone(backfill_num_attempts)
|
||||
|
||||
# And as a sanity check, make sure the "pulled" event was persisted.
|
||||
persisted = self.get_success(
|
||||
main_store.get_event(pulled_event.event_id, allow_none=True)
|
||||
)
|
||||
self.assertIsNotNone(persisted, "pulled event was not persisted at all")
|
||||
|
||||
def test_process_pulled_event_with_rejected_missing_state(self) -> None:
|
||||
"""Ensure that we correctly handle pulled events with missing state containing a
|
||||
rejected state event
|
||||
|
||||
In this test, we pretend we are processing a "pulled" event (eg, via backfill
|
||||
or get_missing_events). The pulled event has a prev_event we haven't previously
|
||||
seen, so the server requests the state at that prev_event. We expect the server
|
||||
to make a /state request.
|
||||
|
||||
We simulate a remote server whose /state includes a rejected kick event for a
|
||||
local user. Notably, the kick event is rejected only because it cites a rejected
|
||||
auth event and would otherwise be accepted based on the room state. During state
|
||||
resolution, we re-run auth and can potentially introduce such rejected events
|
||||
into the state if we are not careful.
|
||||
|
||||
We check that the pulled event is correctly persisted, and that the state
|
||||
afterwards does not include the rejected kick.
|
||||
"""
|
||||
# The DAG we are testing looks like:
|
||||
#
|
||||
# ...
|
||||
# |
|
||||
# v
|
||||
# remote admin user joins
|
||||
# | |
|
||||
# +-------+ +-------+
|
||||
# | |
|
||||
# | rejected power levels
|
||||
# | from remote server
|
||||
# | |
|
||||
# | v
|
||||
# | rejected kick of local user
|
||||
# v from remote server
|
||||
# new power levels |
|
||||
# | v
|
||||
# | missing event
|
||||
# | from remote server
|
||||
# | |
|
||||
# +-------+ +-------+
|
||||
# | |
|
||||
# v v
|
||||
# pulled event
|
||||
# from remote server
|
||||
#
|
||||
# (arrows are in the opposite direction to prev_events.)
|
||||
|
||||
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
|
||||
main_store = self.hs.get_datastores().main
|
||||
|
||||
# Create the room.
|
||||
kermit_user_id = self.register_user("kermit", "test")
|
||||
kermit_tok = self.login("kermit", "test")
|
||||
room_id = self.helper.create_room_as(
|
||||
room_creator=kermit_user_id, tok=kermit_tok
|
||||
)
|
||||
room_version = self.get_success(main_store.get_room_version(room_id))
|
||||
|
||||
# Add another local user to the room. This user is going to be kicked in a
|
||||
# rejected event.
|
||||
bert_user_id = self.register_user("bert", "test")
|
||||
bert_tok = self.login("bert", "test")
|
||||
self.helper.join(room_id, user=bert_user_id, tok=bert_tok)
|
||||
|
||||
# Allow the remote user to kick bert.
|
||||
# The remote user is going to send a rejected power levels event later on and we
|
||||
# need state resolution to order it before another power levels event kermit is
|
||||
# going to send later on. Hence we give both users the same power level, so that
|
||||
# ties are broken by `origin_server_ts`.
|
||||
self.helper.send_state(
|
||||
room_id,
|
||||
"m.room.power_levels",
|
||||
{"users": {kermit_user_id: 100, OTHER_USER: 100}},
|
||||
tok=kermit_tok,
|
||||
)
|
||||
|
||||
# Add the remote user to the room.
|
||||
other_member_event = self.get_success(
|
||||
event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
|
||||
)
|
||||
|
||||
initial_state_map = self.get_success(
|
||||
main_store.get_partial_current_state_ids(room_id)
|
||||
)
|
||||
create_event = self.get_success(
|
||||
main_store.get_event(initial_state_map[("m.room.create", "")])
|
||||
)
|
||||
bert_member_event = self.get_success(
|
||||
main_store.get_event(initial_state_map[("m.room.member", bert_user_id)])
|
||||
)
|
||||
power_levels_event = self.get_success(
|
||||
main_store.get_event(initial_state_map[("m.room.power_levels", "")])
|
||||
)
|
||||
|
||||
# We now need a rejected state event that will fail
|
||||
# `check_state_independent_auth_rules` but pass
|
||||
# `check_state_dependent_auth_rules`.
|
||||
|
||||
# First, we create a power levels event that we pretend the remote server has
|
||||
# accepted, but the local homeserver will reject.
|
||||
next_depth = 100
|
||||
next_timestamp = other_member_event.origin_server_ts + 100
|
||||
rejected_power_levels_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "m.room.power_levels",
|
||||
"state_key": "",
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [other_member_event.event_id],
|
||||
"auth_events": [
|
||||
initial_state_map[("m.room.create", "")],
|
||||
initial_state_map[("m.room.power_levels", "")],
|
||||
# The event will be rejected because of the duplicated auth
|
||||
# event.
|
||||
other_member_event.event_id,
|
||||
other_member_event.event_id,
|
||||
],
|
||||
"origin_server_ts": next_timestamp,
|
||||
"depth": next_depth,
|
||||
"content": power_levels_event.content,
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
next_depth += 1
|
||||
next_timestamp += 100
|
||||
|
||||
with LoggingContext("send_rejected_power_levels_event"):
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME,
|
||||
rejected_power_levels_event,
|
||||
backfilled=False,
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.get_success(
|
||||
main_store.get_rejection_reason(
|
||||
rejected_power_levels_event.event_id
|
||||
)
|
||||
),
|
||||
"auth_error",
|
||||
)
|
||||
|
||||
# Then we create a kick event for a local user that cites the rejected power
|
||||
# levels event in its auth events. The kick event will be rejected solely
|
||||
# because of the rejected auth event and would otherwise be accepted.
|
||||
rejected_kick_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "m.room.member",
|
||||
"state_key": bert_user_id,
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [rejected_power_levels_event.event_id],
|
||||
"auth_events": [
|
||||
initial_state_map[("m.room.create", "")],
|
||||
rejected_power_levels_event.event_id,
|
||||
initial_state_map[("m.room.member", bert_user_id)],
|
||||
initial_state_map[("m.room.member", OTHER_USER)],
|
||||
],
|
||||
"origin_server_ts": next_timestamp,
|
||||
"depth": next_depth,
|
||||
"content": {"membership": "leave"},
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
next_depth += 1
|
||||
next_timestamp += 100
|
||||
|
||||
# The kick event must fail the state-independent auth rules, but pass the
|
||||
# state-dependent auth rules, so that it has a chance of making it through state
|
||||
# resolution.
|
||||
self.get_failure(
|
||||
check_state_independent_auth_rules(main_store, rejected_kick_event),
|
||||
AuthError,
|
||||
)
|
||||
check_state_dependent_auth_rules(
|
||||
rejected_kick_event,
|
||||
[create_event, power_levels_event, other_member_event, bert_member_event],
|
||||
)
|
||||
|
||||
# The kick event must also win over the original member event during state
|
||||
# resolution.
|
||||
self.assertEqual(
|
||||
self.get_success(
|
||||
_mainline_sort(
|
||||
self.clock,
|
||||
room_id,
|
||||
event_ids=[
|
||||
bert_member_event.event_id,
|
||||
rejected_kick_event.event_id,
|
||||
],
|
||||
resolved_power_event_id=power_levels_event.event_id,
|
||||
event_map={
|
||||
bert_member_event.event_id: bert_member_event,
|
||||
rejected_kick_event.event_id: rejected_kick_event,
|
||||
},
|
||||
state_res_store=main_store,
|
||||
)
|
||||
),
|
||||
[bert_member_event.event_id, rejected_kick_event.event_id],
|
||||
"The rejected kick event will not be applied after bert's join event "
|
||||
"during state resolution. The test setup is incorrect.",
|
||||
)
|
||||
|
||||
with LoggingContext("send_rejected_kick_event"):
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME, rejected_kick_event, backfilled=False
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.get_success(
|
||||
main_store.get_rejection_reason(rejected_kick_event.event_id)
|
||||
),
|
||||
"auth_error",
|
||||
)
|
||||
|
||||
# We need another power levels event which will win over the rejected one during
|
||||
# state resolution, otherwise we hit other issues where we end up with rejected
|
||||
# a power levels event during state resolution.
|
||||
self.reactor.advance(100) # ensure the `origin_server_ts` is larger
|
||||
new_power_levels_event = self.get_success(
|
||||
main_store.get_event(
|
||||
self.helper.send_state(
|
||||
room_id,
|
||||
"m.room.power_levels",
|
||||
{"users": {kermit_user_id: 100, OTHER_USER: 100, bert_user_id: 1}},
|
||||
tok=kermit_tok,
|
||||
)["event_id"]
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.get_success(
|
||||
_reverse_topological_power_sort(
|
||||
self.clock,
|
||||
room_id,
|
||||
event_ids=[
|
||||
new_power_levels_event.event_id,
|
||||
rejected_power_levels_event.event_id,
|
||||
],
|
||||
event_map={},
|
||||
state_res_store=main_store,
|
||||
full_conflicted_set=set(),
|
||||
)
|
||||
),
|
||||
[rejected_power_levels_event.event_id, new_power_levels_event.event_id],
|
||||
"The power levels events will not have the desired ordering during state "
|
||||
"resolution. The test setup is incorrect.",
|
||||
)
|
||||
|
||||
# Create a missing event, so that the local homeserver has to do a `/state` or
|
||||
# `/state_ids` request to pull state from the remote homeserver.
|
||||
missing_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "m.room.message",
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [rejected_kick_event.event_id],
|
||||
"auth_events": [
|
||||
initial_state_map[("m.room.create", "")],
|
||||
initial_state_map[("m.room.power_levels", "")],
|
||||
initial_state_map[("m.room.member", OTHER_USER)],
|
||||
],
|
||||
"origin_server_ts": next_timestamp,
|
||||
"depth": next_depth,
|
||||
"content": {"msgtype": "m.text", "body": "foo"},
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
next_depth += 1
|
||||
next_timestamp += 100
|
||||
|
||||
# The pulled event has two prev events, one of which is missing. We will make a
|
||||
# `/state` or `/state_ids` request to the remote homeserver to ask it for the
|
||||
# state before the missing prev event.
|
||||
pulled_event = make_event_from_dict(
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
{
|
||||
"type": "m.room.message",
|
||||
"room_id": room_id,
|
||||
"sender": OTHER_USER,
|
||||
"prev_events": [
|
||||
new_power_levels_event.event_id,
|
||||
missing_event.event_id,
|
||||
],
|
||||
"auth_events": [
|
||||
initial_state_map[("m.room.create", "")],
|
||||
new_power_levels_event.event_id,
|
||||
initial_state_map[("m.room.member", OTHER_USER)],
|
||||
],
|
||||
"origin_server_ts": next_timestamp,
|
||||
"depth": next_depth,
|
||||
"content": {"msgtype": "m.text", "body": "bar"},
|
||||
}
|
||||
),
|
||||
room_version,
|
||||
)
|
||||
next_depth += 1
|
||||
next_timestamp += 100
|
||||
|
||||
# Prepare the response for the `/state` or `/state_ids` request.
|
||||
# The remote server believes bert has been kicked, while the local server does
|
||||
# not.
|
||||
state_before_missing_event = self.get_success(
|
||||
main_store.get_events_as_list(initial_state_map.values())
|
||||
)
|
||||
state_before_missing_event = [
|
||||
event
|
||||
for event in state_before_missing_event
|
||||
if event.event_id != bert_member_event.event_id
|
||||
]
|
||||
state_before_missing_event.append(rejected_kick_event)
|
||||
|
||||
# We have to bump the clock a bit, to keep the retry logic in
|
||||
# `FederationClient.get_pdu` happy
|
||||
self.reactor.advance(60000)
|
||||
with LoggingContext("send_pulled_event"):
|
||||
|
||||
async def get_event(
|
||||
destination: str, event_id: str, timeout: Optional[int] = None
|
||||
) -> JsonDict:
|
||||
self.assertEqual(destination, self.OTHER_SERVER_NAME)
|
||||
self.assertEqual(event_id, missing_event.event_id)
|
||||
return {"pdus": [missing_event.get_pdu_json()]}
|
||||
|
||||
async def get_room_state_ids(
|
||||
destination: str, room_id: str, event_id: str
|
||||
) -> JsonDict:
|
||||
self.assertEqual(destination, self.OTHER_SERVER_NAME)
|
||||
self.assertEqual(event_id, missing_event.event_id)
|
||||
return {
|
||||
"pdu_ids": [event.event_id for event in state_before_missing_event],
|
||||
"auth_chain_ids": [],
|
||||
}
|
||||
|
||||
async def get_room_state(
|
||||
room_version: RoomVersion, destination: str, room_id: str, event_id: str
|
||||
) -> StateRequestResponse:
|
||||
self.assertEqual(destination, self.OTHER_SERVER_NAME)
|
||||
self.assertEqual(event_id, missing_event.event_id)
|
||||
return StateRequestResponse(
|
||||
state=state_before_missing_event,
|
||||
auth_events=[],
|
||||
)
|
||||
|
||||
self.mock_federation_transport_client.get_event.side_effect = get_event
|
||||
self.mock_federation_transport_client.get_room_state_ids.side_effect = (
|
||||
get_room_state_ids
|
||||
)
|
||||
self.mock_federation_transport_client.get_room_state.side_effect = (
|
||||
get_room_state
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.hs.get_federation_event_handler()._process_pulled_event(
|
||||
self.OTHER_SERVER_NAME, pulled_event, backfilled=False
|
||||
)
|
||||
)
|
||||
self.assertIsNone(
|
||||
self.get_success(
|
||||
main_store.get_rejection_reason(pulled_event.event_id)
|
||||
),
|
||||
"Pulled event was unexpectedly rejected, likely due to a problem with "
|
||||
"the test setup.",
|
||||
)
|
||||
self.assertEqual(
|
||||
{pulled_event.event_id},
|
||||
self.get_success(
|
||||
main_store.have_events_in_timeline([pulled_event.event_id])
|
||||
),
|
||||
"Pulled event was not persisted, likely due to a problem with the test "
|
||||
"setup.",
|
||||
)
|
||||
|
||||
# We must not accept rejected events into the room state, so we expect bert
|
||||
# to not be kicked, even if the remote server believes so.
|
||||
new_state_map = self.get_success(
|
||||
main_store.get_partial_current_state_ids(room_id)
|
||||
)
|
||||
self.assertEqual(
|
||||
new_state_map[("m.room.member", bert_user_id)],
|
||||
bert_member_event.event_id,
|
||||
"Rejected kick event unexpectedly became part of room state.",
|
||||
)
|
||||
|
|
|
@ -404,6 +404,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
|
|||
event.event_id,
|
||||
{user_id: actions for user_id, actions in push_actions},
|
||||
False,
|
||||
"main",
|
||||
)
|
||||
)
|
||||
return event, context
|
||||
|
|
|
@ -4140,3 +4140,90 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
|
|||
{"b": 2},
|
||||
channel.json_body["account_data"]["rooms"]["test_room"]["m.per_room"],
|
||||
)
|
||||
|
||||
|
||||
class UsersByExternalIdTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
self.admin_user = self.register_user("admin", "pass", admin=True)
|
||||
self.admin_user_tok = self.login("admin", "pass")
|
||||
|
||||
self.other_user = self.register_user("user", "pass")
|
||||
self.get_success(
|
||||
self.store.record_user_external_id(
|
||||
"the-auth-provider", "the-external-id", self.other_user
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.record_user_external_id(
|
||||
"another-auth-provider", "a:complex@external/id", self.other_user
|
||||
)
|
||||
)
|
||||
|
||||
def test_no_auth(self) -> None:
|
||||
"""Try to lookup a user without authentication."""
|
||||
url = (
|
||||
"/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
url,
|
||||
)
|
||||
|
||||
self.assertEqual(401, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
|
||||
|
||||
def test_binding_does_not_exist(self) -> None:
|
||||
"""Tests that a lookup for an external ID that does not exist returns a 404"""
|
||||
url = "/_synapse/admin/v1/auth_providers/the-auth-provider/users/unknown-id"
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
url,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(404, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
|
||||
|
||||
def test_success(self) -> None:
|
||||
"""Tests a successful external ID lookup"""
|
||||
url = (
|
||||
"/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
url,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(
|
||||
{"user_id": self.other_user},
|
||||
channel.json_body,
|
||||
)
|
||||
|
||||
def test_success_urlencoded(self) -> None:
|
||||
"""Tests a successful external ID lookup with an url-encoded ID"""
|
||||
url = "/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid"
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
url,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(
|
||||
{"user_id": self.other_user},
|
||||
channel.json_body,
|
||||
)
|
||||
|
|
|
@ -11,14 +11,37 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import unittest
|
||||
import unittest as stdlib_unittest
|
||||
|
||||
from pydantic import ValidationError
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.rest.client.models import EmailRequestTokenBody
|
||||
|
||||
|
||||
class EmailRequestTokenBodyTestCase(unittest.TestCase):
|
||||
class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase):
|
||||
class Model(BaseModel):
|
||||
medium: Literal["email", "msisdn"]
|
||||
|
||||
def test_accepts_valid_medium_string(self) -> None:
|
||||
"""Sanity check that Pydantic behaves sensibly with an enum-of-str
|
||||
|
||||
This is arguably more of a test of a class that inherits from str and Enum
|
||||
simultaneously.
|
||||
"""
|
||||
model = self.Model.parse_obj({"medium": "email"})
|
||||
self.assertEqual(model.medium, "email")
|
||||
|
||||
def test_rejects_invalid_medium_value(self) -> None:
|
||||
with self.assertRaises(ValidationError):
|
||||
self.Model.parse_obj({"medium": "interpretive_dance"})
|
||||
|
||||
def test_rejects_invalid_medium_type(self) -> None:
|
||||
with self.assertRaises(ValidationError):
|
||||
self.Model.parse_obj({"medium": 123})
|
||||
|
||||
|
||||
class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase):
|
||||
base_request = {
|
||||
"client_secret": "hunter2",
|
||||
"email": "alice@wonderland.com",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue