From 4de271a7fcde6b46611ba2aa9d45cdc6cc7275ab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 May 2023 17:45:44 +0100 Subject: [PATCH 01/42] Allow adding random delay to push (#15516) This is to discourage timing based profiling on the push gateways. --- changelog.d/15516.feature | 1 + .../configuration/config_documentation.md | 4 ++ synapse/config/push.py | 10 ++++- synapse/push/httppusher.py | 18 +++++++++ tests/push/test_http.py | 37 +++++++++++++++++++ 5 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15516.feature diff --git a/changelog.d/15516.feature b/changelog.d/15516.feature new file mode 100644 index 0000000000..02a101bb88 --- /dev/null +++ b/changelog.d/15516.feature @@ -0,0 +1 @@ +Add a config option to delay push notifications by a random amount, to discourage time-based profiling. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 1b6f256949..b6516191e8 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3442,6 +3442,9 @@ This option has a number of sub-options. They are as follows: user has unread messages in. Defaults to true, meaning push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. +* `jitter_delay`: Delays push notifications by a random amount up to the given + duration. Useful for mitigating timing attacks. Optional, defaults to no + delay. _Added in Synapse 1.84.0._ Example configuration: ```yaml @@ -3449,6 +3452,7 @@ push: enabled: true include_content: false group_unread_count_by_room: false + jitter_delay: "10s" ``` --- ## Rooms diff --git a/synapse/config/push.py b/synapse/config/push.py index 3b5378e6ea..8177ff52e2 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -42,11 +42,17 @@ class PushConfig(Config): # Now check for the one in the 'email' section and honour it, # with a warning. - push_config = config.get("email") or {} - redact_content = push_config.get("redact_content") + email_push_config = config.get("email") or {} + redact_content = email_push_config.get("redact_content") if redact_content is not None: print( "The 'email.redact_content' option is deprecated: " "please set push.include_content instead" ) self.push_include_content = not redact_content + + # Whether to apply a random delay to outbound push. + self.push_jitter_delay_ms = None + push_jitter_delay = push_config.get("jitter_delay", None) + if push_jitter_delay: + self.push_jitter_delay_ms = self.parse_duration(push_jitter_delay) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index a01445e374..e628b484a9 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import random import urllib.parse from typing import TYPE_CHECKING, Dict, List, Optional, Union @@ -114,6 +115,8 @@ class HttpPusher(Pusher): ) self._pusherpool = hs.get_pusherpool() + self.push_jitter_delay_ms = hs.config.push.push_jitter_delay_ms + self.data = pusher_config.data if self.data is None: raise PusherConfigException("'data' key can not be null for HTTP pusher") @@ -327,6 +330,21 @@ class HttpPusher(Pusher): event = await self.store.get_event(push_action.event_id, allow_none=True) if event is None: return True # It's been redacted + + # Check if we should delay sending out the notification by a random + # amount. + # + # Note: we base the delay off of when the event was sent, rather than + # now, to handle the case where we need to send out many notifications + # at once. If we just slept the random amount each loop then the last + # push notification in the set could be delayed by many times the max + # delay. + if self.push_jitter_delay_ms: + delay_ms = random.randint(1, self.push_jitter_delay_ms) + diff_ms = event.origin_server_ts + delay_ms - self.clock.time_msec() + if diff_ms > 0: + await self.clock.sleep(diff_ms / 1000) + rejected = await self.dispatch_push_event(event, tweaks, badge) if rejected is False: return False diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 0fbbef7c8b..4f811bb9c0 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -962,3 +962,40 @@ class HTTPPusherTests(HomeserverTestCase): channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"], lookup_result.device_id, ) + + @override_config({"push": {"jitter_delay": "10s"}}) + def test_jitter(self) -> None: + """Tests that enabling jitter actually delays sending push.""" + user_id, access_token = self._make_user_with_pusher("user") + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + room = self.helper.create_room_as(user_id, tok=access_token) + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # Send a message and check that it did not generate a push, as it should + # be delayed. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 0) + + # Now advance time past the max jitter, and assert the message was sent. + self.reactor.advance(15) + self.assertEqual(len(self.push_attempts), 1) + + self.push_attempts[0][0].callback({}) + + # Now we send a bunch of messages and assert that they were all sent + # within the 10s max delay. + for _ in range(10): + self.helper.send(room, body="Hi!", tok=other_access_token) + + index = 1 + for _ in range(11): + while len(self.push_attempts) > index: + self.push_attempts[index][0].callback({}) + self.pump() + index += 1 + + self.reactor.advance(1) + self.pump() + + self.assertEqual(len(self.push_attempts), 11) From 0e8aa2a1b28dfce374294450a015d18884c89d36 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 2 May 2023 14:21:36 -0700 Subject: [PATCH 02/42] Remove references to supporting per-user flag for msc2654 (#15522) --- changelog.d/15522.misc | 1 + docs/admin_api/experimental_features.md | 13 +++++++------ synapse/rest/admin/experimental_features.py | 1 - tests/rest/admin/test_admin.py | 8 ++------ 4 files changed, 10 insertions(+), 13 deletions(-) create mode 100644 changelog.d/15522.misc diff --git a/changelog.d/15522.misc b/changelog.d/15522.misc new file mode 100644 index 0000000000..a5a229e4a0 --- /dev/null +++ b/changelog.d/15522.misc @@ -0,0 +1 @@ +Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) (#15522). diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md index c1aebe4b01..07b630915d 100644 --- a/docs/admin_api/experimental_features.md +++ b/docs/admin_api/experimental_features.md @@ -1,10 +1,12 @@ # Experimental Features API This API allows a server administrator to enable or disable some experimental features on a per-user -basis. Currently supported features are [msc3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy -presence state enabled, [msc2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654): enable unread counts, -[msc3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications -for another client, and [msc3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require +basis. The currently supported features are: +- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy +presence state enabled +- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications +for another client +- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require UIA when first uploading cross-signing keys. @@ -19,7 +21,7 @@ provide a body containing the user id and listing the features to enable/disable { "features": { "msc3026":true, - "msc2654":true + "msc3881":true } } ``` @@ -46,7 +48,6 @@ user like so: { "features": { "msc3026": true, - "msc2654": true, "msc3881": false, "msc3967": false } diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py index 1d409ac2b7..abf273af10 100644 --- a/synapse/rest/admin/experimental_features.py +++ b/synapse/rest/admin/experimental_features.py @@ -33,7 +33,6 @@ class ExperimentalFeature(str, Enum): """ MSC3026 = "msc3026" - MSC2654 = "msc2654" MSC3881 = "msc3881" MSC3967 = "msc3967" diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 645a00b4b1..695e84357a 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -399,7 +399,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase): "PUT", url, content={ - "features": {"msc3026": True, "msc2654": True}, + "features": {"msc3026": True, "msc3881": True}, }, access_token=self.admin_user_tok, ) @@ -420,7 +420,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase): ) self.assertEqual( True, - channel.json_body["features"]["msc2654"], + channel.json_body["features"]["msc3881"], ) # test disabling a feature works @@ -448,10 +448,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase): ) self.assertEqual( True, - channel.json_body["features"]["msc2654"], - ) - self.assertEqual( - False, channel.json_body["features"]["msc3881"], ) self.assertEqual( From 04e79e6a185f466c9a2c8d79f6c9de7f42efc6f7 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 3 May 2023 12:27:33 +0100 Subject: [PATCH 03/42] Add config option to forget rooms automatically when users leave them (#15224) This is largely based off the stats and user directory updater code. Signed-off-by: Sean Quah --- changelog.d/15224.feature | 1 + .../configuration/config_documentation.md | 10 + synapse/config/room.py | 4 + synapse/handlers/room_member.py | 173 ++++++++++++++++-- synapse/handlers/room_member_worker.py | 3 - synapse/server.py | 11 +- synapse/storage/databases/main/roommember.py | 69 ++++--- .../main/delta/76/04_add_room_forgetter.sql | 24 +++ tests/handlers/test_room_member.py | 11 ++ 9 files changed, 259 insertions(+), 47 deletions(-) create mode 100644 changelog.d/15224.feature create mode 100644 synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql diff --git a/changelog.d/15224.feature b/changelog.d/15224.feature new file mode 100644 index 0000000000..5d8413f8be --- /dev/null +++ b/changelog.d/15224.feature @@ -0,0 +1 @@ +Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index b6516191e8..14c21f73fe 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3699,6 +3699,16 @@ default_power_level_content_override: trusted_private_chat: null public_chat: null ``` +--- +### `forget_rooms_on_leave` + +Set to true to automatically forget rooms for users when they leave them, either +normally or via a kick or ban. Defaults to false. + +Example configuration: +```yaml +forget_rooms_on_leave: false +``` --- ## Opentracing diff --git a/synapse/config/room.py b/synapse/config/room.py index 4a7ac00540..b6696cd129 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -75,3 +75,7 @@ class RoomConfig(Config): % preset ) # We validate the actual overrides when we try to apply them. + + # When enabled, users will forget rooms when they leave them, either via a + # leave, kick or ban. + self.forget_on_leave = config.get("forget_rooms_on_leave", False) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ed805d6ec8..fbef600acd 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -16,7 +16,7 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple from synapse import types from synapse.api.constants import ( @@ -38,7 +38,10 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.logging import opentracing +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.types import ( JsonDict, @@ -280,9 +283,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): """ raise NotImplementedError() - @abc.abstractmethod async def forget(self, user: UserID, room_id: str) -> None: - raise NotImplementedError() + user_id = user.to_string() + + member = await self._storage_controllers.state.get_current_state_event( + room_id=room_id, event_type=EventTypes.Member, state_key=user_id + ) + membership = member.membership if member else None + + if membership is not None and membership not in [ + Membership.LEAVE, + Membership.BAN, + ]: + raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + + # In normal case this call is only required if `membership` is not `None`. + # But: After the last member had left the room, the background update + # `_background_remove_left_rooms` is deleting rows related to this room from + # the table `current_state_events` and `get_current_state_events` is `None`. + await self.store.forget(user_id, room_id) async def ratelimit_multiple_invites( self, @@ -2046,25 +2065,141 @@ class RoomMemberMasterHandler(RoomMemberHandler): """Implements RoomMemberHandler._user_left_room""" user_left_room(self.distributor, target, room_id) - async def forget(self, user: UserID, room_id: str) -> None: - user_id = user.to_string() - member = await self._storage_controllers.state.get_current_state_event( - room_id=room_id, event_type=EventTypes.Member, state_key=user_id - ) - membership = member.membership if member else None +class RoomForgetterHandler(StateDeltasHandler): + """Forgets rooms when they are left, when enabled in the homeserver config. - if membership is not None and membership not in [ - Membership.LEAVE, - Membership.BAN, - ]: - raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + For the purposes of this feature, kicks, bans and "leaves" via state resolution + weirdness are all considered to be leaves. - # In normal case this call is only required if `membership` is not `None`. - # But: After the last member had left the room, the background update - # `_background_remove_left_rooms` is deleting rows related to this room from - # the table `current_state_events` and `get_current_state_events` is `None`. - await self.store.forget(user_id, room_id) + Derived from `StatsHandler` and `UserDirectoryHandler`. + """ + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._hs = hs + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._room_member_handler = hs.get_room_member_handler() + + # The current position in the current_state_delta stream + self.pos: Optional[int] = None + + # Guard to ensure we only process deltas one at a time + self._is_processing = False + + if hs.config.worker.run_background_tasks: + self._notifier.add_replication_callback(self.notify_new_event) + + # We kick this off to pick up outstanding work from before the last restart. + self._clock.call_later(0, self.notify_new_event) + + def notify_new_event(self) -> None: + """Called when there may be more deltas to process""" + if self._is_processing: + return + + self._is_processing = True + + async def process() -> None: + try: + await self._unsafe_process() + finally: + self._is_processing = False + + run_as_background_process("room_forgetter.notify_new_event", process) + + async def _unsafe_process(self) -> None: + # If self.pos is None then means we haven't fetched it from DB + if self.pos is None: + self.pos = await self._store.get_room_forgetter_stream_pos() + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding room forgetter processor", + self.pos, + room_max_stream_ordering, + ) + self.pos = room_max_stream_ordering + + if not self._hs.config.room.forget_on_leave: + # Update the processing position, so that if the server admin turns the + # feature on at a later date, we don't decide to forget every room that + # has ever been left in the past. + self.pos = self._store.get_room_max_stream_ordering() + await self._store.update_room_forgetter_stream_pos(self.pos) + return + + # Loop round handling deltas until we're up to date + + while True: + # Be sure to read the max stream_ordering *before* checking if there are any outstanding + # deltas, since there is otherwise a chance that we could miss updates which arrive + # after we check the deltas. + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos == room_max_stream_ordering: + break + + logger.debug( + "Processing room forgetting %s->%s", self.pos, room_max_stream_ordering + ) + ( + max_pos, + deltas, + ) = await self._storage_controllers.state.get_current_state_deltas( + self.pos, room_max_stream_ordering + ) + + logger.debug("Handling %d state deltas", len(deltas)) + await self._handle_deltas(deltas) + + self.pos = max_pos + + # Expose current event processing position to prometheus + event_processing_positions.labels("room_forgetter").set(max_pos) + + await self._store.update_room_forgetter_stream_pos(max_pos) + + async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: + """Called with the state deltas to process""" + for delta in deltas: + typ = delta["type"] + state_key = delta["state_key"] + room_id = delta["room_id"] + event_id = delta["event_id"] + prev_event_id = delta["prev_event_id"] + + if typ != EventTypes.Member: + continue + + if not self._hs.is_mine_id(state_key): + continue + + change = await self._get_key_change( + prev_event_id, + event_id, + key_name="membership", + public_value=Membership.JOIN, + ) + is_leave = change is MatchChange.now_false + + if is_leave: + try: + await self._room_member_handler.forget( + UserID.from_string(state_key), room_id + ) + except SynapseError as e: + if e.code == 400: + # The user is back in the room. + pass + else: + raise def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 76e36b8a6d..e8ff1ad063 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -137,6 +137,3 @@ class RoomMemberWorkerHandler(RoomMemberHandler): await self._notify_change_client( user_id=target.to_string(), room_id=room_id, change="left" ) - - async def forget(self, target: UserID, room_id: str) -> None: - raise RuntimeError("Cannot forget rooms on workers.") diff --git a/synapse/server.py b/synapse/server.py index 75a902d64d..a0036578b1 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -94,7 +94,11 @@ from synapse.handlers.room import ( ) from synapse.handlers.room_batch import RoomBatchHandler from synapse.handlers.room_list import RoomListHandler -from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler +from synapse.handlers.room_member import ( + RoomForgetterHandler, + RoomMemberHandler, + RoomMemberMasterHandler, +) from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.room_summary import RoomSummaryHandler from synapse.handlers.search import SearchHandler @@ -233,6 +237,7 @@ class HomeServer(metaclass=abc.ABCMeta): "message", "pagination", "profile", + "room_forgetter", "stats", ] @@ -847,6 +852,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_push_rules_handler(self) -> PushRulesHandler: return PushRulesHandler(self) + @cache_in_self + def get_room_forgetter_handler(self) -> RoomForgetterHandler: + return RoomForgetterHandler(self) + @cache_in_self def get_outbound_redis_connection(self) -> "ConnectionHandler": """ diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index daad58291a..e068f27a10 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -82,7 +82,7 @@ class EventIdMembership: membership: str -class RoomMemberWorkerStore(EventsWorkerStore): +class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def __init__( self, database: DatabasePool, @@ -1372,6 +1372,50 @@ class RoomMemberWorkerStore(EventsWorkerStore): _is_local_host_in_room_ignoring_users_txn, ) + async def forget(self, user_id: str, room_id: str) -> None: + """Indicate that user_id wishes to discard history for room_id.""" + + def f(txn: LoggingTransaction) -> None: + self.db_pool.simple_update_txn( + txn, + table="room_memberships", + keyvalues={"user_id": user_id, "room_id": room_id}, + updatevalues={"forgotten": 1}, + ) + + self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) + self._invalidate_cache_and_stream( + txn, self.get_forgotten_rooms_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("forget_membership", f) + + async def get_room_forgetter_stream_pos(self) -> int: + """Get the stream position of the background process to forget rooms when left + by users. + """ + return await self.db_pool.simple_select_one_onecol( + table="room_forgetter_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="room_forgetter_stream_pos", + ) + + async def update_room_forgetter_stream_pos(self, stream_id: int) -> None: + """Update the stream position of the background process to forget rooms when + left by users. + + Must only be used by the worker running the background process. + """ + assert self.hs.config.worker.run_background_tasks + + await self.db_pool.simple_update_one( + table="room_forgetter_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="room_forgetter_stream_pos", + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( @@ -1553,29 +1597,6 @@ class RoomMemberStore( ): super().__init__(database, db_conn, hs) - async def forget(self, user_id: str, room_id: str) -> None: - """Indicate that user_id wishes to discard history for room_id.""" - - def f(txn: LoggingTransaction) -> None: - sql = ( - "UPDATE" - " room_memberships" - " SET" - " forgotten = 1" - " WHERE" - " user_id = ?" - " AND" - " room_id = ?" - ) - txn.execute(sql, (user_id, room_id)) - - self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) - self._invalidate_cache_and_stream( - txn, self.get_forgotten_rooms_for_user, (user_id,) - ) - - await self.db_pool.runInteraction("forget_membership", f) - def extract_heroes_from_room_summary( details: Mapping[str, MemberSummary], me: str diff --git a/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql new file mode 100644 index 0000000000..be4b57d86f --- /dev/null +++ b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql @@ -0,0 +1,24 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE room_forgetter_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +INSERT INTO room_forgetter_stream_pos ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 6a38893b68..a444d822cd 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -333,6 +333,17 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) + @override_config({"forget_rooms_on_leave": True}) + def test_leave_and_auto_forget(self) -> None: + """Tests the `forget_rooms_on_leave` config option.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + # alice is not the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + def test_leave_and_forget_last_user(self) -> None: """Tests that forget a room is successfully when the last user has left the room.""" From a7b3e9ce65335e452de216cb42b9e724e8f3ad1d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 3 May 2023 07:49:03 -0400 Subject: [PATCH 04/42] Set thread_id column to non-null for event_push_{actions,actions_staging,summary} (#15437) Updates the database schema to require a thread_id (by adding a constraint that the column is non-null) for event_push_actions, event_push_actions_staging, and event_push_actions_summary. For PostgreSQL we add the constraint as NOT VALID, then VALIDATE the constraint a background job to avoid locking the table during an upgrade. For SQLite we simply rebuild the table & copy the data. --- changelog.d/15437.misc | 1 + synapse/storage/background_updates.py | 44 ++++ .../databases/main/event_push_actions.py | 244 +----------------- synapse/storage/schema/__init__.py | 3 + .../76/04thread_notifications_backfill.sql | 28 ++ ...thread_notifications_not_null.sql.postgres | 37 +++ ...05thread_notifications_not_null.sql.sqlite | 102 ++++++++ 7 files changed, 225 insertions(+), 234 deletions(-) create mode 100644 changelog.d/15437.misc create mode 100644 synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql create mode 100644 synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres create mode 100644 synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite diff --git a/changelog.d/15437.misc b/changelog.d/15437.misc new file mode 100644 index 0000000000..2dea23784f --- /dev/null +++ b/changelog.d/15437.misc @@ -0,0 +1 @@ +Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a99aea8926..ca085ef800 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -561,6 +561,50 @@ class BackgroundUpdater: updater, oneshot=True ) + def register_background_validate_constraint( + self, update_name: str, constraint_name: str, table: str + ) -> None: + """Helper for store classes to do a background validate constraint. + + This only applies on PostgreSQL. + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('validate_my_constraint', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name: update_name to register for + constraint_name: name of constraint to validate + table: table the constraint is applied to + """ + + def runner(conn: Connection) -> None: + c = conn.cursor() + + sql = f""" + ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}; + """ + logger.debug("[SQL] %s", sql) + c.execute(sql) + + async def updater(progress: JsonDict, batch_size: int) -> int: + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + logger.info("Validating constraint %s to %s", constraint_name, table) + await self.db_pool.runWithConnection(runner) + await self._end_background_update(update_name) + return 1 + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + async def create_index_in_background( self, index_name: str, diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eeccf5db24..ab8f354dc1 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -100,7 +100,6 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore -from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -289,180 +288,22 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas unique=True, ) - self.db_pool.updates.register_background_update_handler( - "event_push_backfill_thread_id", - self._background_backfill_thread_id, + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_staging_thread_id", + constraint_name="event_push_actions_staging_thread_id", + table="event_push_actions_staging", ) - - # Indexes which will be used to quickly make the thread_id column non-null. - self.db_pool.updates.register_background_index_update( - "event_push_actions_thread_id_null", - index_name="event_push_actions_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_thread_id", + constraint_name="event_push_actions_thread_id", table="event_push_actions", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - self.db_pool.updates.register_background_index_update( - "event_push_summary_thread_id_null", - index_name="event_push_summary_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_summary_thread_id", + constraint_name="event_push_summary_thread_id", table="event_push_summary", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates the event_push_actions and event_push_summary tables. - self._clock.call_later(0.0, self._check_event_push_backfill_thread_id) - self._event_push_backfill_thread_id_done = False - - @wrap_as_background_process("check_event_push_backfill_thread_id") - async def _check_event_push_backfill_thread_id(self) -> None: - """ - Has thread_id finished backfilling? - - If not, we need to just-in-time update it so the queries work. - """ - done = await self.db_pool.updates.has_completed_background_update( - "event_push_backfill_thread_id" - ) - - if done: - self._event_push_backfill_thread_id_done = True - else: - # Reschedule to run. - self._clock.call_later(15.0, self._check_event_push_backfill_thread_id) - - async def _background_backfill_thread_id( - self, progress: JsonDict, batch_size: int - ) -> int: - """ - Fill in the thread_id field for event_push_actions and event_push_summary. - - This is preparatory so that it can be made non-nullable in the future. - - Because all current (null) data is done in an unthreaded manner this - simply assumes it is on the "main" timeline. Since event_push_actions - are periodically cleared it is not possible to correctly re-calculate - the thread_id. - """ - event_push_actions_done = progress.get("event_push_actions_done", False) - - def add_thread_id_txn( - txn: LoggingTransaction, start_stream_ordering: int - ) -> int: - sql = """ - SELECT stream_ordering - FROM event_push_actions - WHERE - thread_id IS NULL - AND stream_ordering > ? - ORDER BY stream_ordering - LIMIT ? - """ - txn.execute(sql, (start_stream_ordering, batch_size)) - - # No more rows to process. - rows = txn.fetchall() - if not rows: - progress["event_push_actions_done"] = True - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - return 0 - - # Update the thread ID for any of those rows. - max_stream_ordering = rows[-1][0] - - sql = """ - UPDATE event_push_actions - SET thread_id = 'main' - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """ - txn.execute( - sql, - ( - start_stream_ordering, - max_stream_ordering, - ), - ) - - # Update progress. - processed_rows = txn.rowcount - progress["max_event_push_actions_stream_ordering"] = max_stream_ordering - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - def add_thread_id_summary_txn(txn: LoggingTransaction) -> int: - min_user_id = progress.get("max_summary_user_id", "") - min_room_id = progress.get("max_summary_room_id", "") - - # Slightly overcomplicated query for getting the Nth user ID / room - # ID tuple, or the last if there are less than N remaining. - sql = """ - SELECT user_id, room_id FROM ( - SELECT user_id, room_id FROM event_push_summary - WHERE (user_id, room_id) > (?, ?) - AND thread_id IS NULL - ORDER BY user_id, room_id - LIMIT ? - ) AS e - ORDER BY user_id DESC, room_id DESC - LIMIT 1 - """ - - txn.execute(sql, (min_user_id, min_room_id, batch_size)) - row = txn.fetchone() - if not row: - return 0 - - max_user_id, max_room_id = row - - sql = """ - UPDATE event_push_summary - SET thread_id = 'main' - WHERE - (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?) - AND thread_id IS NULL - """ - txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id)) - processed_rows = txn.rowcount - - progress["max_summary_user_id"] = max_user_id - progress["max_summary_room_id"] = max_room_id - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - # First update the event_push_actions table, then the event_push_summary table. - # - # Note that the event_push_actions_staging table is ignored since it is - # assumed that items in that table will only exist for a short period of - # time. - if not event_push_actions_done: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_txn, - progress.get("max_event_push_actions_stream_ordering", 0), - ) - else: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_summary_txn, - ) - - # Only done after the event_push_summary table is done. - if not result: - await self.db_pool.updates._end_background_update( - "event_push_backfill_thread_id" - ) - - return result - async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: """Get the notification count by room for a user. Only considers notifications, not highlight or unread counts, and threads are currently aggregated under their room. @@ -711,25 +552,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # First we pull the counts from the summary table. # # We check that `last_receipt_stream_ordering` matches the stream ordering of the @@ -1545,25 +1367,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering, *thread_args), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # Fetch the notification counts between the stream ordering of the # latest receipt and what was previously summarised. unread_counts = self._get_notif_unread_count_for_user_room( @@ -1698,19 +1501,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas rotate_to_stream_ordering: The new maximum event stream ordering to summarise. """ - # Ensure that any new actions have an updated thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """, - (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # XXX Do we need to update summaries here too? - # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, thread_id, @@ -1773,20 +1563,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications, handling %d rows", len(summaries)) - # Ensure that any updated threads have the proper thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute_batch( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - [ - (MAIN_TIMELINE, room_id, user_id) - for user_id, room_id, _ in summaries - ], - ) - self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 1672976209..741563abc6 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -106,6 +106,9 @@ Changes in SCHEMA_VERSION = 76: SCHEMA_COMPAT_VERSION = ( # Queries against `event_stream_ordering` columns in membership tables must # be disambiguated. + # + # The threads_id column must written to with non-null values for the + # event_push_actions, event_push_actions_staging, and event_push_summary tables. 74 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql new file mode 100644 index 0000000000..ce6f9ff937 --- /dev/null +++ b/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Force the background updates from 06thread_notifications.sql to run in the +-- foreground as code will now require those to be "done". + +DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id'; + +-- Overwrite any null thread_id values. +UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Drop the background updates to calculate the indexes used to find null thread_ids. +DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; +DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null'; diff --git a/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres new file mode 100644 index 0000000000..40936def6f --- /dev/null +++ b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres @@ -0,0 +1,37 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions_staging + ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; +ALTER TABLE event_push_actions + ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; +ALTER TABLE event_push_summary + ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7605, 'event_push_actions_staging_thread_id', '{}'), + (7605, 'event_push_actions_thread_id', '{}'), + (7605, 'event_push_summary_thread_id', '{}'); + +-- Drop the indexes used to find null thread_ids. +DROP INDEX IF EXISTS event_push_actions_thread_id_null; +DROP INDEX IF EXISTS event_push_summary_thread_id_null; diff --git a/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite new file mode 100644 index 0000000000..e9372b6cf9 --- /dev/null +++ b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite @@ -0,0 +1,102 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- The thread_id columns can now be made non-nullable. +-- +-- SQLite doesn't support modifying columns to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE event_push_actions_staging_new ( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + actions TEXT NOT NULL, + notif SMALLINT NOT NULL, + highlight SMALLINT NOT NULL, + unread SMALLINT, + thread_id TEXT, + inserted_ts BIGINT, + CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_actions_new ( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + topological_ordering BIGINT, + stream_ordering BIGINT, + notif SMALLINT, + highlight SMALLINT, + unread SMALLINT, + thread_id TEXT, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag), + CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_summary_new ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + unread_count BIGINT, + last_receipt_stream_ordering BIGINT, + thread_id TEXT, + CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL) +); + +-- Copy the data. +INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts) + SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts + FROM event_push_actions_staging; + +INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id) + SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id + FROM event_push_actions; + +INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id + FROM event_push_summary; + +-- Drop the old tables. +DROP TABLE event_push_actions_staging; +DROP TABLE event_push_actions; +DROP TABLE event_push_summary; + +-- Rename the tables. +ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging; +ALTER TABLE event_push_actions_new RENAME TO event_push_actions; +ALTER TABLE event_push_summary_new RENAME TO event_push_summary; + +-- Recreate the indexes. +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); + +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); + +CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ; + +-- Recreate some indexes in the background, by re-running the background updates +-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7403, 'event_push_summary_unique_index2', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7403, 'event_push_actions_stream_highlight_index', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; From 9890f23469092be88f5669e226e9f81d2d309cb2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 3 May 2023 13:07:49 +0100 Subject: [PATCH 05/42] Suppress the trusted key server warning for matrix.org in the demo scripts (#15527) --- changelog.d/15527.misc | 1 + demo/start.sh | 10 +++------- 2 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15527.misc diff --git a/changelog.d/15527.misc b/changelog.d/15527.misc new file mode 100644 index 0000000000..752a32adeb --- /dev/null +++ b/changelog.d/15527.misc @@ -0,0 +1 @@ +Don't use a trusted key server when running the demo scripts. \ No newline at end of file diff --git a/demo/start.sh b/demo/start.sh index fdd75816fb..06ec6f985f 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -46,7 +46,7 @@ for port in 8080 8081 8082; do echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. - # Please don't accidentaly bork me with your fancy settings. + # Please don't accidentally bork me with your fancy settings. listeners=$(cat <<-PORTLISTENERS # Configure server to listen on both $https_port and $port # This overides some of the default settings above @@ -80,12 +80,8 @@ for port in 8080 8081 8082; do echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" - # Ignore keys from the trusted keys server - echo '# Ignore keys from the trusted keys server' - echo 'trusted_key_servers:' - echo ' - server_name: "matrix.org"' - echo ' accept_keys_insecurely: true' - echo '' + # Request keys directly from servers contacted over federation + echo 'trusted_key_servers: []' # Allow the servers to communicate over localhost. allow_list=$(cat <<-ALLOW_LIST From fc3a878220f934a248b008277e89b85ad187d220 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 May 2023 14:41:37 +0100 Subject: [PATCH 06/42] Speed up rebuilding of the user directory for local users (#15529) The idea here is to batch up the work. --- changelog.d/15529.misc | 1 + synapse/storage/database.py | 13 +- .../storage/databases/main/user_directory.py | 233 ++++++++++++------ 3 files changed, 171 insertions(+), 76 deletions(-) create mode 100644 changelog.d/15529.misc diff --git a/changelog.d/15529.misc b/changelog.d/15529.misc new file mode 100644 index 0000000000..7ad424d8df --- /dev/null +++ b/changelog.d/15529.misc @@ -0,0 +1 @@ +Speed up rebuilding of the user directory for local users. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 1f5f5eb6f8..313cf1a8d0 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -386,13 +386,20 @@ class LoggingTransaction: self.executemany(sql, args) def execute_values( - self, sql: str, values: Iterable[Iterable[Any]], fetch: bool = True + self, + sql: str, + values: Iterable[Iterable[Any]], + template: Optional[str] = None, + fetch: bool = True, ) -> List[Tuple]: """Corresponds to psycopg2.extras.execute_values. Only available when using postgres. The `fetch` parameter must be set to False if the query does not return rows (e.g. INSERTs). + + The `template` is the snippet to merge to every item in argslist to + compose the query. """ assert isinstance(self.database_engine, PostgresEngine) from psycopg2.extras import execute_values @@ -400,7 +407,9 @@ class LoggingTransaction: return self._do_execute( # TODO: is it safe for values to be Iterable[Iterable[Any]] here? # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] - lambda the_sql: execute_values(self.txn, the_sql, values, fetch=fetch), + lambda the_sql: execute_values( + self.txn, the_sql, values, template=template, fetch=fetch + ), sql, ) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 5d65faed16..b7d58978de 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -27,6 +27,8 @@ from typing import ( cast, ) +import attr + try: # Figure out if ICU support is available for searching users. import icu @@ -66,6 +68,19 @@ logger = logging.getLogger(__name__) TEMP_TABLE = "_temp_populate_user_directory" +@attr.s(auto_attribs=True, frozen=True) +class _UserDirProfile: + """Helper type for the user directory code for an entry to be inserted into + the directory. + """ + + user_id: str + + # If the display name or avatar URL are unexpected types, replace with None + display_name: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + avatar_url: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + + class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # How many records do we calculate before sending it to # add_users_who_share_private_rooms? @@ -381,25 +396,65 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): % (len(users_to_work_on), progress["remaining"]) ) - for user_id in users_to_work_on: - if await self.should_include_local_user_in_dir(user_id): - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) # type: ignore[attr-defined] - await self.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url - ) + # First filter down to users we want to insert into the user directory. + users_to_insert = [ + user_id + for user_id in users_to_work_on + if await self.should_include_local_user_in_dir(user_id) + ] - # We've finished processing a user. Delete it from the table. - await self.db_pool.simple_delete_one( - TEMP_TABLE + "_users", {"user_id": user_id} - ) - # Update the remaining counter. - progress["remaining"] -= 1 - await self.db_pool.runInteraction( - "populate_user_directory", - self.db_pool.updates._background_update_progress_txn, - "populate_user_directory_process_users", - progress, + # Next fetch their profiles. Note that the `user_id` here is the + # *localpart*, and that not all users have profiles. + profile_rows = await self.db_pool.simple_select_many_batch( + table="profiles", + column="user_id", + iterable=[get_localpart_from_id(u) for u in users_to_insert], + retcols=( + "user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, + desc="populate_user_directory_process_users_get_profiles", + ) + profiles = { + f"@{row['user_id']}:{self.server_name}": _UserDirProfile( + f"@{row['user_id']}:{self.server_name}", + row["displayname"], + row["avatar_url"], ) + for row in profile_rows + } + + profiles_to_insert = [ + profiles.get(user_id) or _UserDirProfile(user_id) + for user_id in users_to_insert + ] + + # Actually insert the users with their profiles into the directory. + await self.db_pool.runInteraction( + "populate_user_directory_process_users_insertion", + self._update_profiles_in_user_dir_txn, + profiles_to_insert, + ) + + # We've finished processing the users. Delete it from the table. + await self.db_pool.simple_delete_many( + table=TEMP_TABLE + "_users", + column="user_id", + iterable=users_to_work_on, + keyvalues={}, + desc="populate_user_directory_process_users_delete", + ) + + # Update the remaining counter. + progress["remaining"] -= len(users_to_work_on) + await self.db_pool.runInteraction( + "populate_user_directory", + self.db_pool.updates._background_update_progress_txn, + "populate_user_directory_process_users", + progress, + ) return len(users_to_work_on) @@ -584,72 +639,102 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): Update or add a user's profile in the user directory. If the user is remote, the profile will be marked as not stale. """ - # If the display name or avatar URL are unexpected types, replace with None. - display_name = non_null_str_or_none(display_name) - avatar_url = non_null_str_or_none(avatar_url) + await self.db_pool.runInteraction( + "update_profiles_in_user_dir", + self._update_profiles_in_user_dir_txn, + [_UserDirProfile(user_id, display_name, avatar_url)], + ) - def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: - self.db_pool.simple_upsert_txn( + def _update_profiles_in_user_dir_txn( + self, + txn: LoggingTransaction, + profiles: Sequence[_UserDirProfile], + ) -> None: + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("display_name", "avatar_url"), + value_values=[ + ( + p.display_name, + p.avatar_url, + ) + for p in profiles + ], + ) + + # Remote users: Make sure the profile is not marked as stale anymore. + remote_users = [ + p.user_id for p in profiles if not self.hs.is_mine_id(p.user_id) + ] + if remote_users: + self.db_pool.simple_delete_many_txn( txn, - table="user_directory", - keyvalues={"user_id": user_id}, - values={"display_name": display_name, "avatar_url": avatar_url}, + table="user_directory_stale_remote_users", + column="user_id", + values=remote_users, + keyvalues={}, ) - if not self.hs.is_mine_id(user_id): - # Remote users: Make sure the profile is not marked as stale anymore. - self.db_pool.simple_delete_txn( - txn, - table="user_directory_stale_remote_users", - keyvalues={"user_id": user_id}, + if isinstance(self.database_engine, PostgresEngine): + # We weight the localpart most highly, then display name and finally + # server name + template = """ + ( + %s, + setweight(to_tsvector('simple', %s), 'A') + || setweight(to_tsvector('simple', %s), 'D') + || setweight(to_tsvector('simple', COALESCE(%s, '')), 'B') ) + """ - # The display name that goes into the database index. - index_display_name = display_name - if index_display_name is not None: - index_display_name = _filter_text_for_index(index_display_name) - - if isinstance(self.database_engine, PostgresEngine): - # We weight the localpart most highly, then display name and finally - # server name - sql = """ - INSERT INTO user_directory_search(user_id, vector) - VALUES (?, - setweight(to_tsvector('simple', ?), 'A') - || setweight(to_tsvector('simple', ?), 'D') - || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') - ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector - """ - txn.execute( - sql, + sql = """ + INSERT INTO user_directory_search(user_id, vector) + VALUES ? ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector + """ + txn.execute_values( + sql, + [ ( - user_id, - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - index_display_name, - ), - ) - elif isinstance(self.database_engine, Sqlite3Engine): - value = ( - "%s %s" % (user_id, index_display_name) - if index_display_name - else user_id - ) - self.db_pool.simple_upsert_txn( - txn, - table="user_directory_search", - keyvalues={"user_id": user_id}, - values={"value": value}, - ) - else: - # This should be unreachable. - raise Exception("Unrecognized database engine") + p.user_id, + get_localpart_from_id(p.user_id), + get_domain_from_id(p.user_id), + _filter_text_for_index(p.display_name) + if p.display_name + else None, + ) + for p in profiles + ], + template=template, + fetch=False, + ) + elif isinstance(self.database_engine, Sqlite3Engine): + values = [] + for p in profiles: + if p.display_name is not None: + index_display_name = _filter_text_for_index(p.display_name) + value = f"{p.user_id} {index_display_name}" + else: + value = p.user_id - txn.call_after(self.get_user_in_directory.invalidate, (user_id,)) + values.append((value,)) - await self.db_pool.runInteraction( - "update_profile_in_user_dir", _update_profile_in_user_dir_txn - ) + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory_search", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("value",), + value_values=values, + ) + else: + # This should be unreachable. + raise Exception("Unrecognized database engine") + + for p in profiles: + txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,)) async def add_users_who_share_private_room( self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]] From 28ac1a1a91c972c19649e21a6e8d92bb786d8a57 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 May 2023 14:42:43 +0100 Subject: [PATCH 07/42] Speed up deleting of old rows in `event_push_actions` (#15531) Enforce that we use index scans (rather than seq scans), which we also do for state queries. The reason to enforce this is that we can't correctly get PostgreSQL to understand the distribution of `stream_ordering` depends on `highlight`, and so it always defaults (on matrix.org) to sequential scans. --- changelog.d/15531.misc | 1 + synapse/storage/databases/main/event_push_actions.py | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelog.d/15531.misc diff --git a/changelog.d/15531.misc b/changelog.d/15531.misc new file mode 100644 index 0000000000..6d4da961b5 --- /dev/null +++ b/changelog.d/15531.misc @@ -0,0 +1 @@ +Speed up deleting of old rows in `event_push_actions`. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index ab8f354dc1..2e98a29fef 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1612,6 +1612,15 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # deletes. batch_size = self._rotate_count + if isinstance(self.database_engine, PostgresEngine): + # Temporarily disable sequential scans in this transaction. We + # need to do this as the postgres statistics don't take into + # account the `highlight = 0` part when estimating the + # distribution of `stream_ordering`. I.e. since we keep old + # highlight rows the query planner thinks there are way more old + # rows to delete than there actually are. + txn.execute("SET LOCAL enable_seqscan=off") + txn.execute( """ SELECT stream_ordering FROM event_push_actions From 7be05df0b143bd7bf25a6cc3a489d0af913b48cf Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 3 May 2023 15:40:10 +0100 Subject: [PATCH 08/42] Switch back to upstream devenv flake (#15533) --- changelog.d/15533.misc | 1 + flake.lock | 12 ++++++------ flake.nix | 6 +----- 3 files changed, 8 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15533.misc diff --git a/changelog.d/15533.misc b/changelog.d/15533.misc new file mode 100644 index 0000000000..8ed23526ef --- /dev/null +++ b/changelog.d/15533.misc @@ -0,0 +1 @@ +Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. \ No newline at end of file diff --git a/flake.lock b/flake.lock index 85886b730f..d1c933e9aa 100644 --- a/flake.lock +++ b/flake.lock @@ -8,16 +8,16 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1682534083, - "narHash": "sha256-lBgFaLNHRQtD3InZbBXzIS8HgZUgcPJ6jiqGa4FJPrk=", - "owner": "anoadragon453", + "lastModified": 1683102061, + "narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=", + "owner": "cachix", "repo": "devenv", - "rev": "9694bd0a845dd184d4468cc3d3461089aace787a", + "rev": "ff1f29e41756553174d596cafe3a9fa77595100b", "type": "github" }, "original": { - "owner": "anoadragon453", - "ref": "anoa/fix_languages_python", + "owner": "cachix", + "ref": "main", "repo": "devenv", "type": "github" } diff --git a/flake.nix b/flake.nix index 91916d9abb..3e24a6691e 100644 --- a/flake.nix +++ b/flake.nix @@ -50,11 +50,7 @@ # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. - # This is temporarily overridden to a fork that fixes a quirk between - # devenv's service and python language features. This can be removed - # when https://github.com/cachix/devenv/pull/559 is merged upstream. - devenv.url = "github:anoadragon453/devenv/anoa/fix_languages_python"; - #devenv.url = "github:cachix/devenv/main"; + devenv.url = "github:cachix/devenv/main"; # Rust toolchains and rust-analyzer nightly. fenix = { url = "github:nix-community/fenix"; From 1d6140ec8a94f7320c0ade3081d82de3b1dc1770 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 3 May 2023 16:26:15 +0100 Subject: [PATCH 09/42] Add `xmlsec` package to flake.nix (#15532) --- changelog.d/15532.misc | 1 + flake.nix | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15532.misc diff --git a/changelog.d/15532.misc b/changelog.d/15532.misc new file mode 100644 index 0000000000..e58273f297 --- /dev/null +++ b/changelog.d/15532.misc @@ -0,0 +1 @@ +Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/flake.nix b/flake.nix index 3e24a6691e..76243d0601 100644 --- a/flake.nix +++ b/flake.nix @@ -93,6 +93,7 @@ # Native dependencies for unit tests (SyTest also requires OpenSSL). openssl + xmlsec # Native dependencies for running Complement. olm From 5f8822854d0427d132674615f13becf420c57bd2 Mon Sep 17 00:00:00 2001 From: Sandro Date: Wed, 3 May 2023 18:54:42 +0200 Subject: [PATCH 10/42] Use oEmbed for YouTube Shorts (#15025) It seems that YouTube Short previews do not work in some regions, but the oEmbed information for those areas is still valid. This causes YouTube Shorts to always use (only) the oEmbed endpoint which is a minor regression for regions where the URL preview was already working -- some of the additional video metadata is lost. It is not likely that clients are using this today and it is more beneficial to have a limited preview working everywhere than unused metadata in the Open Graph response. --- changelog.d/15025.misc | 1 + synapse/res/providers.json | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/15025.misc diff --git a/changelog.d/15025.misc b/changelog.d/15025.misc new file mode 100644 index 0000000000..1f04d85729 --- /dev/null +++ b/changelog.d/15025.misc @@ -0,0 +1 @@ +Use oEmbed to generate URL previews for YouTube Shorts. diff --git a/synapse/res/providers.json b/synapse/res/providers.json index 7b9958e454..2dc9fec8e3 100644 --- a/synapse/res/providers.json +++ b/synapse/res/providers.json @@ -11,5 +11,18 @@ "url": "https://publish.twitter.com/oembed" } ] + }, + { + "provider_name": "YouTube Shorts", + "provider_url": "http://www.youtube.com/", + "endpoints": [ + { + "schemes": [ + "https://youtube.com/shorts/*", + "https://*.youtube.com/shorts/*" + ], + "url": "https://www.youtube.com/oembed" + } + ] } ] From cc872eaf164e72e2589c9deac1190a879cbd7deb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 May 2023 23:45:42 +0100 Subject: [PATCH 11/42] Bump pyicu from 2.10.2 to 2.11 (#15509) * Bump pyicu from 2.10.2 to 2.11 Bumps [pyicu](https://gitlab.pyicu.org/main/pyicu) from 2.10.2 to 2.11. --- updated-dependencies: - dependency-name: pyicu dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15509.misc | 1 + poetry.lock | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15509.misc diff --git a/changelog.d/15509.misc b/changelog.d/15509.misc new file mode 100644 index 0000000000..1eb26c83b7 --- /dev/null +++ b/changelog.d/15509.misc @@ -0,0 +1 @@ +Bump pyicu from 2.10.2 to 2.11. diff --git a/poetry.lock b/poetry.lock index 2ca6d89cb6..8e82fddeba 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1973,13 +1973,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pyicu" -version = "2.10.2" +version = "2.11" description = "Python extension wrapping the ICU C++ API" category = "main" optional = true python-versions = "*" files = [ - {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"}, + {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"}, ] [[package]] From ded8f3d349d8481d1c9a48835cde0b94f785e371 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 4 May 2023 07:54:13 -0400 Subject: [PATCH 12/42] Update the base rules to remove the dont_notify action. (MSC3987) (#15534) A dont_notify action is a no-op (and coalesce is undefined). These are both considered no-ops by the spec, per MSC3987 and the predefined push rules were updated to remove dont_notify from the list of actions. --- changelog.d/15534.misc | 1 + rust/src/push/base_rules.rs | 6 +++--- rust/src/push/evaluator.rs | 7 ++++--- rust/src/push/mod.rs | 6 ++++-- synapse/handlers/push_rules.py | 2 ++ 5 files changed, 14 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15534.misc diff --git a/changelog.d/15534.misc b/changelog.d/15534.misc new file mode 100644 index 0000000000..fd9ba2a6e1 --- /dev/null +++ b/changelog.d/15534.misc @@ -0,0 +1 @@ +Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index d7c73c1f25..51372e1553 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -57,7 +57,7 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.master"), priority_class: 5, conditions: Cow::Borrowed(&[]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: false, }]; @@ -88,7 +88,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern: Cow::Borrowed("m.notice"), }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, @@ -122,7 +122,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern: Cow::Borrowed("m.room.member"), }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 6941c61ea4..2d7c4c06be 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -140,7 +140,7 @@ impl PushRuleEvaluator { /// name. /// /// Returns the set of actions, if any, that match (filtering out any - /// `dont_notify` actions). + /// `dont_notify` and `coalesce` actions). pub fn run( &self, push_rules: &FilteredPushRules, @@ -198,8 +198,9 @@ impl PushRuleEvaluator { let actions = push_rule .actions .iter() - // Filter out "dont_notify" actions, as we don't store them. - .filter(|a| **a != Action::DontNotify) + // Filter out "dont_notify" and "coalesce" actions, as we don't store them + // (since they result in no action by the pushers). + .filter(|a| **a != Action::DontNotify && **a != Action::Coalesce) .cloned() .collect(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 42c7c84132..f19d3c739f 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -164,11 +164,13 @@ impl PushRule { /// The "action" Synapse should perform for a matching push rule. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Action { - DontNotify, Notify, - Coalesce, SetTweak(SetTweak), + // Legacy actions that should be understood, but are equivalent to no-ops. + DontNotify, + Coalesce, + // An unrecognized custom action. Unknown(Value), } diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 1219672a59..813f3aa2d5 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -129,6 +129,8 @@ def check_actions(actions: List[Union[str, JsonDict]]) -> None: raise InvalidRuleException("No actions found") for a in actions: + # "dont_notify" and "coalesce" are legacy actions. They are allowed, but + # ignored (resulting in no action from the pusher). if a in ["notify", "dont_notify", "coalesce"]: pass elif isinstance(a, dict) and "set_tweak" in a: From 2e59e97ebd02e93da39e6c90335d3b24ed01217a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 4 May 2023 15:18:22 +0100 Subject: [PATCH 13/42] Move ThirdPartyEventRules into module_api/callbacks (#15535) --- changelog.d/15535.misc | 1 + synapse/app/_base.py | 4 +- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 4 +- synapse/handlers/directory.py | 6 +- synapse/handlers/federation.py | 6 +- synapse/handlers/federation_event.py | 4 +- synapse/handlers/message.py | 7 +-- synapse/handlers/profile.py | 2 +- synapse/handlers/room.py | 10 ++-- synapse/handlers/room_member.py | 6 +- synapse/module_api/__init__.py | 31 +++++----- synapse/module_api/callbacks/__init__.py | 4 ++ .../third_party_event_rules_callbacks.py} | 4 +- synapse/notifier.py | 2 +- synapse/rest/admin/rooms.py | 2 +- synapse/server.py | 5 -- tests/rest/client/test_third_party_rules.py | 56 ++++++++++++------- tests/server.py | 4 +- 19 files changed, 92 insertions(+), 68 deletions(-) create mode 100644 changelog.d/15535.misc rename synapse/{events/third_party_rules.py => module_api/callbacks/third_party_event_rules_callbacks.py} (99%) diff --git a/changelog.d/15535.misc b/changelog.d/15535.misc new file mode 100644 index 0000000000..9981606c32 --- /dev/null +++ b/changelog.d/15535.misc @@ -0,0 +1 @@ +Move various module API callback registration methods to a dedicated class. \ No newline at end of file diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 954402e4d2..7f83b34d89 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -64,7 +64,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig from synapse.crypto import context_factory from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseSite from synapse.logging.context import PreserveLoggingContext @@ -73,6 +72,9 @@ from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.types import ISynapseReactor from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 1e89447044..59e340974d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -212,7 +212,7 @@ class AuthHandler: self._password_enabled_for_login = hs.config.auth.password_enabled_for_login self._password_enabled_for_reauth = hs.config.auth.password_enabled_for_reauth self._password_localdb_enabled = hs.config.auth.password_localdb_enabled - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Ratelimiter for failed auth during UIA. Uses same ratelimit config # as per `rc_login.failed_attempts`. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index bd5867491b..f299b89a1b 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -39,11 +39,11 @@ class DeactivateAccountHandler: self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() self._server_name = hs.hostname - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 5e8316e2e5..1e0623c7f8 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -52,7 +52,9 @@ class DirectoryHandler: self.config = hs.config self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.require_membership = hs.config.server.require_membership_for_aliases - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self.server_name = hs.hostname self.federation = hs.get_federation_client() @@ -503,7 +505,7 @@ class DirectoryHandler: # Check if publishing is blocked by a third party module allowed_by_third_party_rules = ( await ( - self.third_party_event_rules.check_visibility_can_be_modified( + self._third_party_event_rules.check_visibility_can_be_modified( room_id, visibility ) ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d1a88cc604..4ad808a5b4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -169,7 +169,9 @@ class FederationHandler: self._room_backfill = Linearizer("room_backfill") - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) # Tracks running partial state syncs by room ID. # Partial state syncs currently only run on the main process, so it's okay to @@ -1253,7 +1255,7 @@ class FederationHandler: unpersisted_context, ) = await self.event_creation_handler.create_new_client_event(builder=builder) - event_allowed, _ = await self.third_party_event_rules.check_event_allowed( + event_allowed, _ = await self._third_party_event_rules.check_event_allowed( event, unpersisted_context ) if not event_allowed: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 06609fab93..fc15024166 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -157,7 +157,9 @@ class FederationEventHandler: self._get_room_member_handler = hs.get_room_member_handler self._federation_client = hs.get_federation_client() - self._third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self._notifier = hs.get_notifier() self._is_mine_id = hs.is_mine_id diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ac1932a7f9..0b61c2272b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -77,7 +77,6 @@ from synapse.util.metrics import measure_func from synapse.visibility import get_effective_room_visibility_from_state if TYPE_CHECKING: - from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -509,8 +508,8 @@ class EventCreationHandler: self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker - self.third_party_event_rules: "ThirdPartyEventRules" = ( - self.hs.get_third_party_event_rules() + self._third_party_event_rules = ( + self.hs.get_module_api_callbacks().third_party_event_rules ) self._block_events_without_consent_error = ( @@ -1314,7 +1313,7 @@ class EventCreationHandler: if requester: context.app_service = requester.app_service - res, new_content = await self.third_party_event_rules.check_event_allowed( + res, new_content = await self._third_party_event_rules.check_event_allowed( event, context ) if res is False: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 440d3f4acd..983b9b66fb 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -61,7 +61,7 @@ class ProfileHandler: self.server_name = hs.config.server.server_name - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: target_user = UserID.from_string(user_id) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index efd9612d90..5e1702d78a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -160,7 +160,9 @@ class RoomCreationHandler: ) self._server_notices_mxid = hs.config.servernotices.server_notices_mxid - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion @@ -742,7 +744,7 @@ class RoomCreationHandler: # Let the third party rules modify the room creation config if needed, or abort # the room creation entirely with an exception. - await self.third_party_event_rules.on_create_room( + await self._third_party_event_rules.on_create_room( requester, config, is_requester_admin=is_requester_admin ) @@ -879,7 +881,7 @@ class RoomCreationHandler: # Check whether this visibility value is blocked by a third party module allowed_by_third_party_rules = ( await ( - self.third_party_event_rules.check_visibility_can_be_modified( + self._third_party_event_rules.check_visibility_can_be_modified( room_id, visibility ) ) @@ -1731,7 +1733,7 @@ class RoomShutdownHandler: self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index fbef600acd..af0ca5c26d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -100,7 +100,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.clock = hs.get_clock() self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self._server_notices_mxid = self.config.servernotices.server_notices_mxid self._enable_lookup = hs.config.registration.enable_3pid_lookup self.allow_per_room_profiles = self.config.server.allow_per_room_profiles @@ -1560,7 +1562,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # can't just rely on the standard ratelimiting of events. await self._third_party_invite_limiter.ratelimit(requester) - can_invite = await self.third_party_event_rules.check_threepid_can_be_invited( + can_invite = await self._third_party_event_rules.check_threepid_can_be_invited( medium, address, room_id ) if not can_invite: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 90eff030b5..4b59e6825b 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -44,20 +44,6 @@ from synapse.events.presence_router import ( GET_USERS_FOR_STATES_CALLBACK, PresenceRouter, ) -from synapse.events.third_party_rules import ( - CHECK_CAN_DEACTIVATE_USER_CALLBACK, - CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, - CHECK_EVENT_ALLOWED_CALLBACK, - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, - CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, - ON_CREATE_ROOM_CALLBACK, - ON_NEW_EVENT_CALLBACK, - ON_PROFILE_UPDATE_CALLBACK, - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, - ON_THREEPID_BIND_CALLBACK, - ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, -) from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, @@ -105,6 +91,20 @@ from synapse.module_api.callbacks.spamchecker_callbacks import ( USER_MAY_SEND_3PID_INVITE_CALLBACK, SpamCheckerModuleApiCallbacks, ) +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + CHECK_CAN_DEACTIVATE_USER_CALLBACK, + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, + CHECK_EVENT_ALLOWED_CALLBACK, + CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, + CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, + ON_CREATE_ROOM_CALLBACK, + ON_NEW_EVENT_CALLBACK, + ON_PROFILE_UPDATE_CALLBACK, + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, + ON_THREEPID_BIND_CALLBACK, + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, +) from synapse.push.httppusher import HttpPusher from synapse.rest.client.login import LoginResponse from synapse.storage import DataStore @@ -273,7 +273,6 @@ class ModuleApi: self._public_room_list_manager = PublicRoomListManager(hs) self._account_data_manager = AccountDataManager(hs) - self._third_party_event_rules = hs.get_third_party_event_rules() self._password_auth_provider = hs.get_password_auth_provider() self._presence_router = hs.get_presence_router() self._account_data_handler = hs.get_account_data_handler() @@ -371,7 +370,7 @@ class ModuleApi: Added in Synapse v1.39.0. """ - return self._third_party_event_rules.register_third_party_rules_callbacks( + return self._callbacks.third_party_event_rules.register_third_party_rules_callbacks( check_event_allowed=check_event_allowed, on_create_room=on_create_room, check_threepid_can_be_invited=check_threepid_can_be_invited, diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py index 5cdb2c003a..dcb036552b 100644 --- a/synapse/module_api/callbacks/__init__.py +++ b/synapse/module_api/callbacks/__init__.py @@ -23,9 +23,13 @@ from synapse.module_api.callbacks.account_validity_callbacks import ( from synapse.module_api.callbacks.spamchecker_callbacks import ( SpamCheckerModuleApiCallbacks, ) +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + ThirdPartyEventRulesModuleApiCallbacks, +) class ModuleApiCallbacks: def __init__(self, hs: "HomeServer") -> None: self.account_validity = AccountValidityModuleApiCallbacks() self.spam_checker = SpamCheckerModuleApiCallbacks(hs) + self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks(hs) diff --git a/synapse/events/third_party_rules.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py similarity index 99% rename from synapse/events/third_party_rules.py rename to synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 61d4530be7..911f37ba42 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -140,7 +140,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: api.register_third_party_rules_callbacks(**hooks) -class ThirdPartyEventRules: +class ThirdPartyEventRulesModuleApiCallbacks: """Allows server admins to provide a Python module implementing an extra set of rules to apply when processing events. @@ -149,8 +149,6 @@ class ThirdPartyEventRules: """ def __init__(self, hs: "HomeServer"): - self.third_party_rules = None - self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() diff --git a/synapse/notifier.py b/synapse/notifier.py index a8832a3f8e..897272ad5b 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -232,7 +232,7 @@ class Notifier: self._federation_client = hs.get_federation_http_client() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 4de56bf13f..1d65560265 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -70,7 +70,7 @@ class RoomRestV2Servlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main self._pagination_handler = hs.get_pagination_handler() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def on_DELETE( self, request: SynapseRequest, room_id: str diff --git a/synapse/server.py b/synapse/server.py index e597627a6d..c557c60482 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -42,7 +42,6 @@ from synapse.crypto.context_factory import RegularPolicyForHTTPS from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.presence_router import PresenceRouter -from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer from synapse.federation.federation_client import FederationClient from synapse.federation.federation_server import ( @@ -691,10 +690,6 @@ class HomeServer(metaclass=abc.ABCMeta): def get_stats_handler(self) -> StatsHandler: return StatsHandler(self) - @cache_in_self - def get_third_party_event_rules(self) -> ThirdPartyEventRules: - return ThirdPartyEventRules(self) - @cache_in_self def get_password_auth_provider(self) -> PasswordAuthProvider: return PasswordAuthProvider() diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 753ecc8d16..e5ba5a9706 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -22,7 +22,9 @@ from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase -from synapse.events.third_party_rules import load_legacy_third_party_event_rules +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.rest import admin from synapse.rest.client import account, login, profile, room from synapse.server import HomeServer @@ -146,7 +148,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [ + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ callback ] @@ -202,7 +204,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ) -> Tuple[bool, Optional[JsonDict]]: raise NastyHackException(429, "message") - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # Make a request channel = self.make_request( @@ -229,7 +233,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ev.content = {"x": "y"} return True, None - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # now send the event channel = self.make_request( @@ -253,7 +259,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): d["content"] = {"x": "y"} return True, d - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # now send the event channel = self.make_request( @@ -289,7 +297,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): } return True, d - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # Send an event, then edit it. channel = self.make_request( @@ -440,7 +450,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ) return True, None - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [test_fn] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + test_fn + ] # Sometimes the bug might not happen the first time the event type is added # to the state but might happen when an event updates the state of the room for @@ -466,7 +478,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): def test_on_new_event(self) -> None: """Test that the on_new_event callback is called on new events""" on_new_event = Mock(make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_new_event_callbacks.append( + self.hs.get_module_api_callbacks().third_party_event_rules._on_new_event_callbacks.append( on_new_event ) @@ -569,7 +581,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( + m + ) # Change the display name. channel = self.make_request( @@ -628,7 +642,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( + m + ) # Register an admin user. self.register_user("admin", "password", admin=True) @@ -667,7 +683,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append( deactivation_mock, ) @@ -675,7 +691,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # deactivation code calls it in a way that let modules know the user is being # deactivated. profile_mock = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append( + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( profile_mock, ) @@ -725,7 +741,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append(m) # Register an admin user. @@ -779,7 +795,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, ) @@ -825,7 +841,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, ) @@ -864,7 +880,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. shutdown_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_shutdown_room_callbacks.append( shutdown_mock, ) @@ -900,7 +916,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. threepid_bind_mock = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock) # Register an admin user. @@ -947,8 +963,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules = self.hs.get_third_party_event_rules() - third_party_rules.register_third_party_rules_callbacks( + self.hs.get_module_api().register_third_party_rules_callbacks( on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) @@ -1009,8 +1024,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules = self.hs.get_third_party_event_rules() - third_party_rules.register_third_party_rules_callbacks( + self.hs.get_module_api().register_third_party_rules_callbacks( on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) diff --git a/tests/server.py b/tests/server.py index a49dc90e32..7296f0a552 100644 --- a/tests/server.py +++ b/tests/server.py @@ -73,11 +73,13 @@ from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseRequest from synapse.logging.context import ContextResourceUsage from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.database import LoggingDatabaseConnection From 83e7fa5eeeea3c7ba321679560d33cca6f4e8221 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 5 May 2023 13:27:51 +0100 Subject: [PATCH 14/42] Allow running Complement integration tests via podman (#15543) --- changelog.d/15543.misc | 1 + docs/development/contributing_guide.md | 1 + scripts-dev/complement.sh | 38 ++++++++++++++++++-------- 3 files changed, 28 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15543.misc diff --git a/changelog.d/15543.misc b/changelog.d/15543.misc new file mode 100644 index 0000000000..ba1dc7597e --- /dev/null +++ b/changelog.d/15543.misc @@ -0,0 +1 @@ +Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. \ No newline at end of file diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 925dcd8933..56cf4ba81e 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -346,6 +346,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`. See the [worker documentation](../workers.md) for additional information on workers. - Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one. +- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker. To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g: ```sh diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 1b1761202f..cba2799f15 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -11,6 +11,11 @@ # filepath of a local Complement checkout or by setting the COMPLEMENT_REF # environment variable to pull a different branch or commit. # +# To use the 'podman' command instead 'docker', set the PODMAN environment +# variable. Example: +# +# PODMAN=1 ./complement.sh +# # By default Synapse is run in monolith mode. This can be overridden by # setting the WORKERS environment variable. # @@ -30,7 +35,6 @@ # Exit if a line returns a non-zero exit code set -e - # Helper to emit annotations that collapse portions of the log in GitHub Actions echo_if_github() { if [[ -n "$GITHUB_WORKFLOW" ]]; then @@ -100,6 +104,16 @@ done # enable buildkit for the docker builds export DOCKER_BUILDKIT=1 +# Determine whether to use the docker or podman container runtime. +if [ -n "$PODMAN" ]; then + export CONTAINER_RUNTIME=podman + export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock + export BUILDAH_FORMAT=docker + export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal +else + export CONTAINER_RUNTIME=docker +fi + # Change to the repository root cd "$(dirname $0)/.." @@ -126,16 +140,16 @@ if [ -n "$use_editable_synapse" ]; then editable_mount="$(realpath .):/editable-src:z" if [ -n "$rebuild_editable_synapse" ]; then unset skip_docker_build - elif docker inspect complement-synapse-editable &>/dev/null; then + elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then # complement-synapse-editable already exists: see if we can still use it: # - The Rust module must still be importable; it will fail to import if the Rust source has changed. # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) # First set up the module in the right place for an editable installation. - docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so - if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ - && docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then + if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ + && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then skip_docker_build=1 else echo "Editable Synapse image is stale. Will rebuild." @@ -149,25 +163,25 @@ if [ -z "$skip_docker_build" ]; then # Build a special image designed for use in development with editable # installs. - docker build -t synapse-editable \ + $CONTAINER_RUNTIME build -t synapse-editable \ -f "docker/editable.Dockerfile" . - docker build -t synapse-workers-editable \ + $CONTAINER_RUNTIME build -t synapse-workers-editable \ --build-arg FROM=synapse-editable \ -f "docker/Dockerfile-workers" . - docker build -t complement-synapse-editable \ + $CONTAINER_RUNTIME build -t complement-synapse-editable \ --build-arg FROM=synapse-workers-editable \ -f "docker/complement/Dockerfile" "docker/complement" # Prepare the Rust module - docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so else # Build the base Synapse image from the local checkout echo_if_github "::group::Build Docker image: matrixdotorg/synapse" - docker build -t matrixdotorg/synapse \ + $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ -f "docker/Dockerfile" . @@ -175,12 +189,12 @@ if [ -z "$skip_docker_build" ]; then # Build the workers docker image (from the base Synapse image we just built). echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" - docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . echo_if_github "::endgroup::" # Build the unified Complement image (from the worker Synapse image we just built). echo_if_github "::group::Build Docker image: complement/Dockerfile" - docker build -t complement-synapse \ + $CONTAINER_RUNTIME build -t complement-synapse \ -f "docker/complement/Dockerfile" "docker/complement" echo_if_github "::endgroup::" From e46d5f3586025a491d11a31ce2be4c540c38d404 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 5 May 2023 15:06:22 +0100 Subject: [PATCH 15/42] Factor out an `is_mine_server_name` method (#15542) Add an `is_mine_server_name` method, similar to `is_mine_id`. Ideally we would use this consistently, instead of sometimes comparing against `hs.hostname` and other times reaching into `hs.config.server.server_name`. Also fix a bug in the tests where `hs.hostname` would sometimes differ from `hs.config.server.server_name`. Signed-off-by: Sean Quah --- changelog.d/15542.misc | 1 + synapse/api/auth_blocking.py | 4 ++-- synapse/crypto/keyring.py | 4 ++-- synapse/federation/federation_base.py | 2 +- synapse/federation/federation_client.py | 4 ++-- synapse/federation/federation_server.py | 3 ++- synapse/federation/send_queue.py | 3 ++- synapse/federation/sender/__init__.py | 11 ++++++----- synapse/federation/transport/client.py | 4 ++-- synapse/federation/transport/server/_base.py | 5 ++++- synapse/handlers/event_auth.py | 5 +++-- synapse/handlers/federation.py | 3 ++- synapse/handlers/federation_event.py | 3 ++- synapse/handlers/profile.py | 4 ++-- synapse/handlers/sso.py | 3 ++- synapse/handlers/typing.py | 3 ++- synapse/rest/admin/media.py | 4 ++-- synapse/rest/client/room.py | 4 ++-- synapse/rest/media/download_resource.py | 4 ++-- synapse/rest/media/thumbnail_resource.py | 4 ++-- synapse/server.py | 4 ++++ synapse/storage/databases/main/room.py | 2 +- tests/unittest.py | 16 ++++++++++++++-- 23 files changed, 64 insertions(+), 36 deletions(-) create mode 100644 changelog.d/15542.misc diff --git a/changelog.d/15542.misc b/changelog.d/15542.misc new file mode 100644 index 0000000000..32e3d678a1 --- /dev/null +++ b/changelog.d/15542.misc @@ -0,0 +1 @@ +Factor out an `is_mine_server_name` method. diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index 22348d2d86..fcf5b842c6 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -39,7 +39,7 @@ class AuthBlocking: self._mau_limits_reserved_threepids = ( hs.config.server.mau_limits_reserved_threepids ) - self._server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips async def check_auth_blocking( @@ -77,7 +77,7 @@ class AuthBlocking: if requester: if requester.authenticated_entity.startswith("@"): user_id = requester.authenticated_entity - elif requester.authenticated_entity == self._server_name: + elif self._is_mine_server_name(requester.authenticated_entity): # We never block the server from doing actions on behalf of # users. return diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index afdf6863d6..260aab3241 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -173,7 +173,7 @@ class Keyring: process_batch_callback=self._inner_fetch_key_requests, ) - self._hostname = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name # build a FetchKeyResult for each of our own keys, to shortcircuit the # fetcher. @@ -277,7 +277,7 @@ class Keyring: # If we are the originating server, short-circuit the key-fetch for any keys # we already have - if verify_request.server_name == self._hostname: + if self._is_mine_server_name(verify_request.server_name): for key_id in verify_request.key_ids: if key_id in self._local_verify_keys: found_keys[key_id] = self._local_verify_keys[key_id] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 3df975958d..b77022b406 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -49,7 +49,7 @@ class FederationBase: def __init__(self, hs: "HomeServer"): self.hs = hs - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.keyring = hs.get_keyring() self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.store = hs.get_datastores().main diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 0b2d1a78f7..076b9287c6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -854,7 +854,7 @@ class FederationClient(FederationBase): for destination in destinations: # We don't want to ask our own server for information we don't have - if destination == self.server_name: + if self._is_mine_server_name(destination): continue try: @@ -1536,7 +1536,7 @@ class FederationClient(FederationBase): self, destinations: Iterable[str], room_id: str, event_dict: JsonDict ) -> None: for destination in destinations: - if destination == self.server_name: + if self._is_mine_server_name(destination): continue try: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ca43c7bfc0..c590d8f96f 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -129,6 +129,7 @@ class FederationServer(FederationBase): def __init__(self, hs: "HomeServer"): super().__init__(hs) + self.server_name = hs.hostname self.handler = hs.get_federation_handler() self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._federation_event_handler = hs.get_federation_event_handler() @@ -942,7 +943,7 @@ class FederationServer(FederationBase): authorising_server = get_domain_from_id( event.content[EventContentFields.AUTHORISING_USER] ) - if authorising_server != self.server_name: + if not self._is_mine_server_name(authorising_server): raise SynapseError( 400, f"Cannot authorise request from resident server: {authorising_server}", diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0b7c81677e..fb448f2155 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -68,6 +68,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name # We may have multiple federation sender instances, so we need to track # their positions separately. @@ -198,7 +199,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): key: Optional[Hashable] = None, ) -> None: """As per FederationSender""" - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.info("Not sending EDU to ourselves") return diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index edc4b1768c..f3bdc5a4d2 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -362,6 +362,7 @@ class FederationSender(AbstractFederationSender): self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name self._presence_router: Optional["PresenceRouter"] = None self._transaction_manager = TransactionManager(hs) @@ -766,7 +767,7 @@ class FederationSender(AbstractFederationSender): domains = [ d for d in domains_set - if d != self.server_name + if not self.is_mine_server_name(d) and self._federation_shard_config.should_handle(self._instance_name, d) ] if not domains: @@ -832,7 +833,7 @@ class FederationSender(AbstractFederationSender): assert self.is_mine_id(state.user_id) for destination in destinations: - if destination == self.server_name: + if self.is_mine_server_name(destination): continue if not self._federation_shard_config.should_handle( self._instance_name, destination @@ -860,7 +861,7 @@ class FederationSender(AbstractFederationSender): content: content of EDU key: clobbering key for this edu """ - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.info("Not sending EDU to ourselves") return @@ -897,7 +898,7 @@ class FederationSender(AbstractFederationSender): queue.send_edu(edu) def send_device_messages(self, destination: str, immediate: bool = True) -> None: - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.warning("Not sending device update to ourselves") return @@ -919,7 +920,7 @@ class FederationSender(AbstractFederationSender): might have come back. """ - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.warning("Not waking up ourselves") return diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index bc70b94f68..d2fa9976da 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -58,9 +58,9 @@ class TransportLayerClient: """Sends federation HTTP requests to other servers""" def __init__(self, hs: "HomeServer"): - self.server_name = hs.hostname self.client = hs.get_federation_http_client() self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled + self._is_mine_server_name = hs.is_mine_server_name async def get_room_state_ids( self, destination: str, room_id: str, event_id: str @@ -235,7 +235,7 @@ class TransportLayerClient: transaction.transaction_id, ) - if transaction.destination == self.server_name: + if self._is_mine_server_name(transaction.destination): raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index cdaf0d5de7..b6e9c58760 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -57,6 +57,7 @@ class Authenticator: self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.store = hs.get_datastores().main self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist @@ -100,7 +101,9 @@ class Authenticator: json_request["signatures"].setdefault(origin, {})[key] = sig # if the origin_server sent a destination along it needs to match our own server_name - if destination is not None and destination != self.server_name: + if destination is not None and not self._is_mine_server_name( + destination + ): raise AuthenticationError( HTTPStatus.UNAUTHORIZED, "Destination mismatch in auth header", diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 0db0bd7304..3e37c0cbe2 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -29,7 +29,7 @@ from synapse.event_auth import ( ) from synapse.events import EventBase from synapse.events.builder import EventBuilder -from synapse.types import StateMap, StrCollection, get_domain_from_id +from synapse.types import StateMap, StrCollection if TYPE_CHECKING: from synapse.server import HomeServer @@ -47,6 +47,7 @@ class EventAuthHandler: self._store = hs.get_datastores().main self._state_storage_controller = hs.get_storage_controllers().state self._server_name = hs.hostname + self._is_mine_id = hs.is_mine_id async def check_auth_rules_from_context( self, @@ -247,7 +248,7 @@ class EventAuthHandler: if not await self.is_user_in_rooms(allowed_rooms, user_id): # If this is a remote request, the user might be in an allowed room # that we do not know about. - if get_domain_from_id(user_id) != self._server_name: + if not self._is_mine_id(user_id): for room_id in allowed_rooms: if not await self._store.is_host_joined(room_id, self._server_name): raise SynapseError( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4ad808a5b4..19dec4812f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -141,6 +141,7 @@ class FederationHandler: self.server_name = hs.hostname self.keyring = hs.get_keyring() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.event_creation_handler = hs.get_event_creation_handler() self.event_builder_factory = hs.get_event_builder_factory() @@ -453,7 +454,7 @@ class FederationHandler: for dom in domains: # We don't want to ask our own server for information we don't have - if dom == self.server_name: + if self.is_mine_server_name(dom): continue try: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index fc15024166..06343d40e4 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -163,6 +163,7 @@ class FederationEventHandler: self._notifier = hs.get_notifier() self._is_mine_id = hs.is_mine_id + self._is_mine_server_name = hs.is_mine_server_name self._server_name = hs.hostname self._instance_name = hs.get_instance_name() @@ -688,7 +689,7 @@ class FederationEventHandler: server from invalid events (there is probably no point in trying to re-fetch invalid events from every other HS in the room.) """ - if dest == self._server_name: + if self._is_mine_server_name(dest): raise SynapseError(400, "Can't backfill from self.") events = await self._federation_client.backfill( diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 983b9b66fb..48f9858931 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -59,7 +59,7 @@ class ProfileHandler: self.max_avatar_size = hs.config.server.max_avatar_size self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes - self.server_name = hs.config.server.server_name + self._is_mine_server_name = hs.is_mine_server_name self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules @@ -309,7 +309,7 @@ class ProfileHandler: else: server_name = host - if server_name == self.server_name: + if self._is_mine_server_name(server_name): media_info = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index c28325323c..92c3742625 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -194,6 +194,7 @@ class SsoHandler: self._clock = hs.get_clock() self._store = hs.get_datastores().main self._server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self._registration_handler = hs.get_registration_handler() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -802,7 +803,7 @@ class SsoHandler: if profile["avatar_url"] is not None: server_name = profile["avatar_url"].split("/")[-2] media_id = profile["avatar_url"].split("/")[-1] - if server_name == self._server_name: + if self._is_mine_server_name(server_name): media = await self._media_repo.store.get_local_media(media_id) if media is not None and upload_name == media["upload_name"]: logger.info("skipping saving the user avatar") diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 39ae44ea95..7aeae5319c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -68,6 +68,7 @@ class FollowerTypingHandler: self.server_name = hs.config.server.server_name self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name self.federation = None if hs.should_send_federation(): @@ -153,7 +154,7 @@ class FollowerTypingHandler: member.room_id ) for domain in hosts: - if domain != self.server_name: + if not self.is_mine_server_name(domain): logger.debug("sending typing update to %s", domain) self.federation.build_and_send_edu( destination=domain, diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index c134ccfb3d..b7637dff0b 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -258,7 +258,7 @@ class DeleteMediaByID(RestServlet): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.media_repository = hs.get_media_repository() async def on_DELETE( @@ -266,7 +266,7 @@ class DeleteMediaByID(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - if self.server_name != server_name: + if not self._is_mine_server_name(server_name): raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media") if await self.store.get_local_media(media_id) is None: diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 7699cc8d1b..951bd033f5 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -501,7 +501,7 @@ class PublicRoomListRestServlet(RestServlet): limit = None handler = self.hs.get_room_list_handler() - if server and server != self.hs.config.server.server_name: + if server and not self.hs.is_mine_server_name(server): # Ensure the server is valid. try: parse_and_validate_server_name(server) @@ -551,7 +551,7 @@ class PublicRoomListRestServlet(RestServlet): limit = None handler = self.hs.get_room_list_handler() - if server and server != self.hs.config.server.server_name: + if server and not self.hs.is_mine_server_name(server): # Ensure the server is valid. try: parse_and_validate_server_name(server) diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 8f270cf4cc..3c618ef60a 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -37,7 +37,7 @@ class DownloadResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() self.media_repo = media_repo - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) @@ -59,7 +59,7 @@ class DownloadResource(DirectServeJsonResource): b"no-referrer", ) server_name, media_id, name = parse_media_id(request) - if server_name == self.server_name: + if self._is_mine_server_name(server_name): await self.media_repo.get_local_media(request, media_id, name) else: allow_remote = parse_boolean(request, "allow_remote", default=True) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 4ee2a0dbda..a6396fb05a 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -59,7 +59,7 @@ class ThumbnailResource(DirectServeJsonResource): self.media_repo = media_repo self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) @@ -71,7 +71,7 @@ class ThumbnailResource(DirectServeJsonResource): # TODO Parse the Accept header to get an prioritised list of thumbnail types. m_type = "image/png" - if server_name == self.server_name: + if self._is_mine_server_name(server_name): if self.dynamic_thumbnails: await self._select_or_generate_local_thumbnail( request, media_id, width, height, method, m_type diff --git a/synapse/server.py b/synapse/server.py index c557c60482..fd29c28173 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -377,6 +377,10 @@ class HomeServer(metaclass=abc.ABCMeta): return False return localpart_hostname[1] == self.hostname + def is_mine_server_name(self, server_name: str) -> bool: + """Determines whether a server name refers to this homeserver.""" + return server_name == self.hostname + @cache_in_self def get_clock(self) -> Clock: return Clock(self._reactor) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index dd7dbb6901..ca8be8c80d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -996,7 +996,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): If it is `None` media will be removed from quarantine """ logger.info("Quarantining media: %s/%s", server_name, media_id) - is_local = server_name == self.config.server.server_name + is_local = self.hs.is_mine_server_name(server_name) def _quarantine_media_by_id_txn(txn: LoggingTransaction) -> int: local_mxcs = [media_id] if is_local else [] diff --git a/tests/unittest.py b/tests/unittest.py index ee2f78ab01..b6fdf69635 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -566,7 +566,9 @@ class HomeserverTestCase(TestCase): client_ip, ) - def setup_test_homeserver(self, *args: Any, **kwargs: Any) -> HomeServer: + def setup_test_homeserver( + self, name: Optional[str] = None, **kwargs: Any + ) -> HomeServer: """ Set up the test homeserver, meant to be called by the overridable make_homeserver. It automatically passes through the test class's @@ -585,15 +587,25 @@ class HomeserverTestCase(TestCase): else: config = kwargs["config"] + # The server name can be specified using either the `name` argument or a config + # override. The `name` argument takes precedence over any config overrides. + if name is not None: + config["server_name"] = name + # Parse the config from a config dict into a HomeServerConfig config_obj = make_homeserver_config_obj(config) kwargs["config"] = config_obj + # The server name in the config is now `name`, if provided, or the `server_name` + # from a config override, or the default of "test". Whichever it is, we + # construct a homeserver with a matching name. + kwargs["name"] = config_obj.server.server_name + async def run_bg_updates() -> None: with LoggingContext("run_bg_updates"): self.get_success(stor.db_pool.updates.run_background_updates(False)) - hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) + hs = setup_test_homeserver(self.addCleanup, **kwargs) stor = hs.get_datastores().main # Run the database background updates, when running against "master". From 7c95b65873c7a858388b9c99c7e9e15dc5ccb2b5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 5 May 2023 15:51:46 +0100 Subject: [PATCH 16/42] Clean up and clarify "Create or modify Account" Admin API documentation (#15544) --- changelog.d/15544.doc | 1 + docs/admin_api/user_admin_api.md | 83 +++++++++++++---------- synapse/handlers/profile.py | 4 +- synapse/rest/admin/users.py | 2 +- synapse/storage/databases/main/profile.py | 16 +++++ synapse/util/msisdn.py | 6 +- 6 files changed, 72 insertions(+), 40 deletions(-) create mode 100644 changelog.d/15544.doc diff --git a/changelog.d/15544.doc b/changelog.d/15544.doc new file mode 100644 index 0000000000..a6d1e96900 --- /dev/null +++ b/changelog.d/15544.doc @@ -0,0 +1 @@ +Clarify documentation of the "Create or modify account" Admin API. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 86c29ab380..6b952ba396 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -62,7 +62,7 @@ URL parameters: - `user_id`: fully-qualified user id: for example, `@user:server.com`. -## Create or modify Account +## Create or modify account This API allows an administrator to create or modify a user account with a specific `user_id`. @@ -78,28 +78,29 @@ with a body of: ```json { "password": "user_password", - "displayname": "User", + "logout_devices": false, + "displayname": "Alice Marigold", + "avatar_url": "mxc://example.com/abcde12345", "threepids": [ { "medium": "email", - "address": "" + "address": "alice@example.com" }, { "medium": "email", - "address": "" + "address": "alice@domain.org" } ], "external_ids": [ { - "auth_provider": "", - "external_id": "" + "auth_provider": "example", + "external_id": "12345" }, { - "auth_provider": "", - "external_id": "" + "auth_provider": "example2", + "external_id": "abc54321" } ], - "avatar_url": "", "admin": false, "deactivated": false, "user_type": null @@ -112,41 +113,51 @@ Returns HTTP status code: URL parameters: -- `user_id`: fully-qualified user id: for example, `@user:server.com`. +- `user_id` - A fully-qualified user id. For example, `@user:server.com`. Body parameters: -- `password` - string, optional. If provided, the user's password is updated and all +- `password` - **string**, optional. If provided, the user's password is updated and all devices are logged out, unless `logout_devices` is set to `false`. -- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't +- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't logged out even when `password` is provided. -- `displayname` - string, optional, defaults to the value of `user_id`. -- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn) - - `medium` - string. Kind of third-party ID, either `email` or `msisdn`. - - `address` - string. Value of third-party ID. - belonging to a user. -- `external_ids` - array, optional. Allow setting the identifier of the external identity - provider for SSO (Single sign-on). Details in the configuration manual under the - sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers). - - `auth_provider` - string. ID of the external identity provider. Value of `idp_id` - in the homeserver configuration. Note that no error is raised if the provided - value is not in the homeserver configuration. - - `external_id` - string, user ID in the external identity provider. -- `avatar_url` - string, optional, must be a +- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name + will be removed. +- `avatar_url` - **string**, optional. Must be a [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). -- `admin` - bool, optional, defaults to `false`. -- `deactivated` - bool, optional. If unspecified, deactivation state will be left - unchanged on existing accounts and set to `false` for new accounts. - A user cannot be erased by deactivating with this API. For details on - deactivating users see [Deactivate Account](#deactivate-account). -- `user_type` - string or null, optional. If provided, the user type will be - adjusted. If `null` given, the user type will be cleared. Other - allowed options are: `bot` and `support`. + If set to an empty string (`""`), the user's avatar is removed. +- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are + entirely replaced with the given list. Each item in the array is an object with the following + fields: + - `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number). + - `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or + `447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number + with country code "1") for `msisdn`. + Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove + that threepid from any identity servers it is aware has a binding for it. +- `external_ids` - **array**, optional. Allow setting the identifier of the external identity + provider for SSO (Single sign-on). More details are in the configuration manual under the + sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers). + - `auth_provider` - **string**, required. The unique, internal ID of the external identity provider. + The same as `idp_id` from the homeserver configuration. Note that no error is raised if the + provided value is not in the homeserver configuration. + - `external_id` - **string**, required. An identifier for the user in the external identity provider. + When the user logs in to the identity provider, this must be the unique ID that they map to. +- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator, + granting them access to the Admin API, among other things. +- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged. -If the user already exists then optional parameters default to the current value. + Note: the `password` field must also be set if both of the following are true: + - `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user) + - Users are allowed to set their password on this homeserver (both `password_config.enabled` and + `password_config.localdb_enabled` config options are set to `true`). + Users' passwords are wiped upon account deactivation, hence the need to set a new one here. -In order to re-activate an account `deactivated` must be set to `false`. If -users do not login via single-sign-on, a new `password` must be provided. + Note: a user cannot be erased with this API. For more details on + deactivating and erasing users see [Deactivate Account](#deactivate-account). +- `user_type` - **string** or null, optional. If not provided, the user type will be + not be changed. If `null` is given, the user type will be cleared. + Other allowed options are: `bot` and `support`. ## List Accounts diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 48f9858931..a9160c87e3 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -170,8 +170,8 @@ class ProfileHandler: displayname_to_set = None # If the admin changes the display name of a user, the requesting user cannot send - # the join event to update the displayname in the rooms. - # This must be done by the target user himself. + # the join event to update the display name in the rooms. + # This must be done by the target user themselves. if by_admin: requester = create_requester( target_user, diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 331f225116..932333ae57 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -336,7 +336,7 @@ class UserRestServletV2(RestServlet): HTTPStatus.CONFLICT, "External id is already in use." ) - if "avatar_url" in body and isinstance(body["avatar_url"], str): + if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index b109f8c07f..c4022d2427 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -85,6 +85,14 @@ class ProfileWorkerStore(SQLBaseStore): async def set_profile_displayname( self, user_id: UserID, new_displayname: Optional[str] ) -> None: + """ + Set the display name of a user. + + Args: + user_id: The user's ID. + new_displayname: The new display name. If this is None, the user's display + name is removed. + """ user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", @@ -99,6 +107,14 @@ class ProfileWorkerStore(SQLBaseStore): async def set_profile_avatar_url( self, user_id: UserID, new_avatar_url: Optional[str] ) -> None: + """ + Set the avatar of a user. + + Args: + user_id: The user's ID. + new_avatar_url: The new avatar URL. If this is None, the user's avatar is + removed. + """ user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index 1046224f15..3721a1558e 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -22,12 +22,16 @@ def phone_number_to_msisdn(country: str, number: str) -> str: Takes an ISO-3166-1 2 letter country code and phone number and returns an msisdn representing the canonical version of that phone number. + + As an example, if `country` is "GB" and `number` is "7470674927", this + function will return "447470674927". + Args: country: ISO-3166-1 2 letter country code number: Phone number in a national or international format Returns: - The canonical form of the phone number, as an msisdn + The canonical form of the phone number, as an msisdn. Raises: SynapseError if the number could not be parsed. """ From ad141efb474a02f14b5170e3d6606d06ffaa7d96 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 5 May 2023 15:51:58 +0100 Subject: [PATCH 17/42] Add `mdbook` to flake.nix (#15545) --- changelog.d/15532.misc | 2 +- changelog.d/15533.misc | 2 +- changelog.d/15545.misc | 1 + flake.nix | 3 +++ 4 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15545.misc diff --git a/changelog.d/15532.misc b/changelog.d/15532.misc index e58273f297..1ee700f829 100644 --- a/changelog.d/15532.misc +++ b/changelog.d/15532.misc @@ -1 +1 @@ -Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. +Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/changelog.d/15533.misc b/changelog.d/15533.misc index 8ed23526ef..1ee700f829 100644 --- a/changelog.d/15533.misc +++ b/changelog.d/15533.misc @@ -1 +1 @@ -Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. \ No newline at end of file +Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/changelog.d/15545.misc b/changelog.d/15545.misc new file mode 100644 index 0000000000..c7c0741f96 --- /dev/null +++ b/changelog.d/15545.misc @@ -0,0 +1 @@ + Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 76243d0601..7351571e61 100644 --- a/flake.nix +++ b/flake.nix @@ -97,6 +97,9 @@ # Native dependencies for running Complement. olm + + # For building the Synapse documentation website. + mdbook ]; # Install Python and manage a virtualenv with Poetry. From a0f53afd62273767b0f54f227fd0020f64c3f6de Mon Sep 17 00:00:00 2001 From: Zdziszek <132405081+zdzichu-rks@users.noreply.github.com> Date: Fri, 5 May 2023 14:54:32 +0000 Subject: [PATCH 18/42] Handle `DNSNotImplementedError` in SRV resolver (#15523) Signed-off-by: Zdzichu --- changelog.d/15523.bugfix | 1 + synapse/http/federation/srv_resolver.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15523.bugfix diff --git a/changelog.d/15523.bugfix b/changelog.d/15523.bugfix new file mode 100644 index 0000000000..c00754019f --- /dev/null +++ b/changelog.d/15523.bugfix @@ -0,0 +1 @@ +Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index de0e882b33..285baddeb7 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -22,7 +22,7 @@ import attr from twisted.internet.error import ConnectError from twisted.names import client, dns -from twisted.names.error import DNSNameError, DomainError +from twisted.names.error import DNSNameError, DNSNotImplementedError, DomainError from synapse.logging.context import make_deferred_yieldable @@ -145,6 +145,9 @@ class SrvResolver: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. return [] + except DNSNotImplementedError: + # For .onion homeservers this is unavailable, just fallback to host:8448 + return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise From 36df9c5e36cbad2a378d922085453726a21ae80c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 5 May 2023 12:13:50 -0400 Subject: [PATCH 19/42] Implement MSC4009 to widen the allowed Matrix ID grammar (#15536) Behind a configuration flag this adds + to the list of allowed characters in Matrix IDs. The main feature this enables is using full E.164 phone numbers as Matrix IDs. --- changelog.d/15536.feature | 1 + synapse/config/experimental.py | 3 +++ synapse/handlers/register.py | 27 ++++++++++++++------------- synapse/handlers/sso.py | 6 ++++-- synapse/types/__init__.py | 21 +++++++++++++++++++-- tests/handlers/test_register.py | 13 +++++++++++++ 6 files changed, 54 insertions(+), 17 deletions(-) create mode 100644 changelog.d/15536.feature diff --git a/changelog.d/15536.feature b/changelog.d/15536.feature new file mode 100644 index 0000000000..824c24575f --- /dev/null +++ b/changelog.d/15536.feature @@ -0,0 +1 @@ +Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index cab7ccf4b7..514d87cb2c 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -199,3 +199,6 @@ class ExperimentalConfig(Config): # MSC3970: Scope transaction IDs to devices self.msc3970_enabled = experimental.get("msc3970_enabled", False) + + # MSC4009: E.164 Matrix IDs + self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 61c4b833bd..c80946c2e9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -46,7 +46,7 @@ from synapse.replication.http.register import ( ReplicationRegisterServlet, ) from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import RoomAlias, UserID, create_requester +from synapse.types import GUEST_USER_ID_PATTERN, RoomAlias, UserID, create_requester from synapse.types.state import StateFilter if TYPE_CHECKING: @@ -143,10 +143,15 @@ class RegistrationHandler: assigned_user_id: Optional[str] = None, inhibit_user_in_use_error: bool = False, ) -> None: - if types.contains_invalid_mxid_characters(localpart): + if types.contains_invalid_mxid_characters( + localpart, self.hs.config.experimental.msc4009_e164_mxids + ): + extra_chars = ( + "=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./" + ) raise SynapseError( 400, - "User ID can only contain characters a-z, 0-9, or '=_-./'", + f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'", Codes.INVALID_USERNAME, ) @@ -195,16 +200,12 @@ class RegistrationHandler: errcode=Codes.FORBIDDEN, ) - if guest_access_token is None: - try: - int(localpart) - raise SynapseError( - 400, - "Numeric user IDs are reserved for guest users.", - errcode=Codes.INVALID_USERNAME, - ) - except ValueError: - pass + if guest_access_token is None and GUEST_USER_ID_PATTERN.fullmatch(localpart): + raise SynapseError( + 400, + "Numeric user IDs are reserved for guest users.", + errcode=Codes.INVALID_USERNAME, + ) async def register_user( self, diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 92c3742625..25fd2eb3a1 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -225,6 +225,8 @@ class SsoHandler: self._consent_at_registration = hs.config.consent.user_consent_at_registration + self._e164_mxids = hs.config.experimental.msc4009_e164_mxids + def register_identity_provider(self, p: SsoIdentityProvider) -> None: p_id = p.idp_id assert p_id not in self._identity_providers @@ -711,7 +713,7 @@ class SsoHandler: # Since the localpart is provided via a potentially untrusted module, # ensure the MXID is valid before registering. if not attributes.localpart or contains_invalid_mxid_characters( - attributes.localpart + attributes.localpart, self._e164_mxids ): raise MappingException("localpart is invalid: %s" % (attributes.localpart,)) @@ -944,7 +946,7 @@ class SsoHandler: localpart, ) - if contains_invalid_mxid_characters(localpart): + if contains_invalid_mxid_characters(localpart, self._e164_mxids): raise SynapseError(400, "localpart is invalid: %s" % (localpart,)) user_id = UserID(localpart, self._server_name).to_string() user_infos = await self._store.get_users_by_id_case_insensitive(user_id) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 5cee9c3194..325219656a 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -335,18 +335,35 @@ class EventID(DomainSpecificString): mxid_localpart_allowed_characters = set( "_-./=" + string.ascii_lowercase + string.digits ) +# MSC4007 adds the + to the allowed characters. +# +# TODO If this was accepted, update the SSO code to support this, see the callers +# of map_username_to_mxid_localpart. +extended_mxid_localpart_allowed_characters = mxid_localpart_allowed_characters | {"+"} + +# Guest user IDs are purely numeric. +GUEST_USER_ID_PATTERN = re.compile(r"^\d+$") -def contains_invalid_mxid_characters(localpart: str) -> bool: +def contains_invalid_mxid_characters( + localpart: str, use_extended_character_set: bool +) -> bool: """Check for characters not allowed in an mxid or groupid localpart Args: localpart: the localpart to be checked + use_extended_character_set: True to use the extended allowed characters + from MSC4009. Returns: True if there are any naughty characters """ - return any(c not in mxid_localpart_allowed_characters for c in localpart) + allowed_characters = ( + extended_mxid_localpart_allowed_characters + if use_extended_character_set + else mxid_localpart_allowed_characters + ) + return any(c not in allowed_characters for c in localpart) UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index aff1ec4758..73822b07a5 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -586,6 +586,19 @@ class RegistrationTestCase(unittest.HomeserverTestCase): d = self.store.is_support_user(user_id) self.assertFalse(self.get_success(d)) + def test_invalid_user_id(self) -> None: + invalid_user_id = "+abcd" + self.get_failure( + self.handler.register_user(localpart=invalid_user_id), SynapseError + ) + + @override_config({"experimental_features": {"msc4009_e164_mxids": True}}) + def text_extended_user_ids(self) -> None: + """+ should be allowed according to MSC4009.""" + valid_user_id = "+1234" + user_id = self.get_success(self.handler.register_user(localpart=valid_user_id)) + self.assertEqual(user_id, valid_user_id) + def test_invalid_user_id_length(self) -> None: invalid_user_id = "x" * 256 self.get_failure( From 28bceef84e489faf31d14ac1df7ffdb3e4126a9e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 5 May 2023 15:18:47 -0400 Subject: [PATCH 20/42] Check appservices for devices during a /user/devices query. (#15539) MSC3984 proxies /keys/query requests to appservices, but servers will can also requests devices / keys from the /user/devices endpoint. The formats are close enough that we can "proxy" that /user/devices to appservices (by calling /keys/query) and then change the format of the returned data before returning it over federation. --- changelog.d/15539.misc | 1 + synapse/handlers/device.py | 28 +++++++ tests/handlers/test_device.py | 135 +++++++++++++++++++++++++++++++++- 3 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15539.misc diff --git a/changelog.d/15539.misc b/changelog.d/15539.misc new file mode 100644 index 0000000000..e5af5dee5c --- /dev/null +++ b/changelog.d/15539.misc @@ -0,0 +1 @@ +Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b9d3b7fbc6..5d12a39e26 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -75,10 +75,14 @@ class DeviceWorkerHandler: self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.state = hs.get_state_handler() + self._appservice_handler = hs.get_application_service_handler() self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() self.server_name = hs.hostname self._msc3852_enabled = hs.config.experimental.msc3852_enabled + self._query_appservices_for_keys = ( + hs.config.experimental.msc3984_appservice_key_query + ) self.device_list_updater = DeviceListWorkerUpdater(hs) @@ -328,6 +332,30 @@ class DeviceWorkerHandler: user_id, "self_signing" ) + # Check if the application services have any results. + if self._query_appservices_for_keys: + # Query the appservice for all devices for this user. + query: Dict[str, Optional[List[str]]] = {user_id: None} + + # Query the appservices for any keys. + appservice_results = await self._appservice_handler.query_keys(query) + + # Merge results, overriding anything from the database. + appservice_devices = appservice_results.get("device_keys", {}).get( + user_id, {} + ) + + # Filter the database results to only those devices that the appservice has + # *not* responded with. + devices = [d for d in devices if d["device_id"] not in appservice_devices] + # Append the appservice response by wrapping each result in another dictionary. + devices.extend( + {"device_id": device_id, "keys": device} + for device_id, device in appservice_devices.items() + ) + + # TODO Handle cross-signing keys. + return { "user_id": user_id, "stream_id": stream_id, diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index ce7525e29c..ee48f9e546 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -15,15 +15,22 @@ # limitations under the License. from typing import Optional +from unittest import mock from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import NotFoundError, SynapseError +from synapse.appservice import ApplicationService from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler from synapse.server import HomeServer +from synapse.storage.databases.main.appservice import _make_exclusive_regex +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest +from tests.test_utils import make_awaitable +from tests.unittest import override_config user1 = "@boris:aaa" user2 = "@theresa:bbb" @@ -31,7 +38,12 @@ user2 = "@theresa:bbb" class DeviceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + self.appservice_api = mock.Mock() + hs = self.setup_test_homeserver( + "server", + federation_http_client=None, + application_service_api=self.appservice_api, + ) handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler @@ -265,6 +277,127 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) self.reactor.advance(1000) + @override_config({"experimental_features": {"msc3984_appservice_key_query": True}}) + def test_on_federation_query_user_devices_appservice(self) -> None: + """Test that querying of appservices for keys overrides responses from the database.""" + local_user = "@boris:" + self.hs.hostname + device_1 = "abc" + device_2 = "def" + device_3 = "ghi" + + # There are 3 devices: + # + # 1. One which is uploaded to the homeserver. + # 2. One which is uploaded to the homeserver, but a newer copy is returned + # by the appservice. + # 3. One which is only returned by the appservice. + device_key_1: JsonDict = { + "user_id": local_user, + "device_id": device_1, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:abc": "base64+ed25519+key", + "curve25519:abc": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:abc": "base64+signature"}}, + } + device_key_2a: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:def": "base64+ed25519+key", + "curve25519:def": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:def": "base64+signature"}}, + } + + device_key_2b: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + # The device ID is the same (above), but the keys are different. + "keys": { + "ed25519:xyz": "base64+ed25519+key", + "curve25519:xyz": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:xyz": "base64+signature"}}, + } + device_key_3: JsonDict = { + "user_id": local_user, + "device_id": device_3, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:jkl": "base64+ed25519+key", + "curve25519:jkl": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:jkl": "base64+signature"}}, + } + + # Upload keys for devices 1 & 2a. + e2e_keys_handler = self.hs.get_e2e_keys_handler() + self.get_success( + e2e_keys_handler.upload_keys_for_user( + local_user, device_1, {"device_keys": device_key_1} + ) + ) + self.get_success( + e2e_keys_handler.upload_keys_for_user( + local_user, device_2, {"device_keys": device_key_2a} + ) + ) + + # Inject an appservice interested in this user. + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]}, + # Note: this user does not have to match the regex above + sender="@as_main:test", + ) + self.hs.get_datastores().main.services_cache = [appservice] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [appservice] + ) + + # Setup a response. + self.appservice_api.query_keys.return_value = make_awaitable( + { + "device_keys": { + local_user: {device_2: device_key_2b, device_3: device_key_3} + } + } + ) + + # Request all devices. + res = self.get_success( + self.handler.on_federation_query_user_devices(local_user) + ) + self.assertIn("devices", res) + res_devices = res["devices"] + for device in res_devices: + device["keys"].pop("unsigned", None) + self.assertEqual( + res_devices, + [ + {"device_id": device_1, "keys": device_key_1}, + {"device_id": device_2, "keys": device_key_2b}, + {"device_id": device_3, "keys": device_key_3}, + ], + ) + class DehydrationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: From 9a87895b593d76a1e5b62dce40653863ff1d90ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:13:47 -0400 Subject: [PATCH 21/42] Bump types-requests from 2.29.0.0 to 2.30.0.0 (#15553) --- changelog.d/15553.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15553.misc diff --git a/changelog.d/15553.misc b/changelog.d/15553.misc new file mode 100644 index 0000000000..ca9eafd6c1 --- /dev/null +++ b/changelog.d/15553.misc @@ -0,0 +1 @@ +Bump types-requests from 2.29.0.0 to 2.30.0.0. diff --git a/poetry.lock b/poetry.lock index 8e82fddeba..596688b84c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3109,18 +3109,18 @@ files = [ [[package]] name = "types-requests" -version = "2.29.0.0" +version = "2.30.0.0" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.29.0.0.tar.gz", hash = "sha256:c86f4a955d943d2457120dbe719df24ef0924e11177164d10a0373cf311d7b4d"}, - {file = "types_requests-2.29.0.0-py3-none-any.whl", hash = "sha256:4cf6e323e856c779fbe8815bb977a5bf5d6c5034713e4c17ff2a9a20610f5b27"}, + {file = "types-requests-2.30.0.0.tar.gz", hash = "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"}, + {file = "types_requests-2.30.0.0-py3-none-any.whl", hash = "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864"}, ] [package.dependencies] -types-urllib3 = "<1.27" +types-urllib3 = "*" [[package]] name = "types-setuptools" From 245d34bdccbc3519011c00560b9bde0d6539ca97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:14:52 -0400 Subject: [PATCH 22/42] Bump ruff from 0.0.259 to 0.0.265 (#15551) --- changelog.d/15551.misc | 1 + poetry.lock | 38 +++++++++++++++++++------------------- pyproject.toml | 2 +- 3 files changed, 21 insertions(+), 20 deletions(-) create mode 100644 changelog.d/15551.misc diff --git a/changelog.d/15551.misc b/changelog.d/15551.misc new file mode 100644 index 0000000000..a8bedbe0e7 --- /dev/null +++ b/changelog.d/15551.misc @@ -0,0 +1 @@ +Bump ruff from 0.0.259 to 0.0.265. diff --git a/poetry.lock b/poetry.lock index 596688b84c..210bea3a27 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2323,29 +2323,29 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.259" +version = "0.0.265" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.259-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:f3938dc45e2a3f818e9cbd53007265c22246fbfded8837b2c563bf0ebde1a226"}, - {file = "ruff-0.0.259-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:22e1e35bf5f12072cd644d22afd9203641ccf258bc14ff91aa1c43dc14f6047d"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2fb20e89e85d147c85caa807707a1488bccc1f3854dc3d53533e89b52a0c5ff"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:49e903bcda19f6bb0725a962c058eb5d61f40d84ef52ed53b61939b69402ab4e"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71f0ef1985e9a6696fa97da8459917fa34bdaa2c16bd33bd5edead585b7d44f7"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7cfef26619cba184d59aa7fa17b48af5891d51fc0b755a9bc533478a10d4d066"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79b02fa17ec1fd8d306ae302cb47fb614b71e1f539997858243769bcbe78c6d9"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:428507fb321b386dda70d66cd1a8aa0abf51d7c197983d83bb9e4fa5ee60300b"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5fbaea9167f1852757f02133e5daacdb8c75b3431343205395da5b10499927a"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:40ae87f2638484b7e8a7567b04a7af719f1c484c5bf132038b702bb32e1f6577"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:29e2b77b7d5da6a7dd5cf9b738b511355c5734ece56f78e500d4b5bffd58c1a0"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b3c1beacf6037e7f0781d4699d9a2dd4ba2462f475be5b1f45cf84c4ba3c69d"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:daaea322e7e85f4c13d82be9536309e1c4b8b9851bb0cbc7eeb15d490fd46bf9"}, - {file = "ruff-0.0.259-py3-none-win32.whl", hash = "sha256:38704f151323aa5858370a2f792e122cc25e5d1aabe7d42ceeab83da18f0b456"}, - {file = "ruff-0.0.259-py3-none-win_amd64.whl", hash = "sha256:aa9449b898287e621942cc71b9327eceb8f0c357e4065fecefb707ef2d978df8"}, - {file = "ruff-0.0.259-py3-none-win_arm64.whl", hash = "sha256:e4f39e18702de69faaaee3969934b92d7467285627f99a5b6ecd55a7d9f5d086"}, - {file = "ruff-0.0.259.tar.gz", hash = "sha256:8b56496063ab3bfdf72339a5fbebb8bd46e5c5fee25ef11a9f03b208fa0562ec"}, + {file = "ruff-0.0.265-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:30ddfe22de6ce4eb1260408f4480bbbce998f954dbf470228a21a9b2c45955e4"}, + {file = "ruff-0.0.265-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:a11bd0889e88d3342e7bc514554bb4461bf6cc30ec115821c2425cfaac0b1b6a"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a9b38bdb40a998cbc677db55b6225a6c4fadcf8819eb30695e1b8470942426b"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8b44a245b60512403a6a03a5b5212da274d33862225c5eed3bcf12037eb19bb"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b279fa55ea175ef953208a6d8bfbcdcffac1c39b38cdb8c2bfafe9222add70bb"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5028950f7af9b119d43d91b215d5044976e43b96a0d1458d193ef0dd3c587bf8"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4057eb539a1d88eb84e9f6a36e0a999e0f261ed850ae5d5817e68968e7b89ed9"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d586e69ab5cbf521a1910b733412a5735936f6a610d805b89d35b6647e2a66aa"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa17b13cd3f29fc57d06bf34c31f21d043735cc9a681203d634549b0e41047d1"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9ac13b11d9ad3001de9d637974ec5402a67cefdf9fffc3929ab44c2fcbb850a1"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:62a9578b48cfd292c64ea3d28681dc16b1aa7445b7a7709a2884510fc0822118"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0f9967f84da42d28e3d9d9354cc1575f96ed69e6e40a7d4b780a7a0418d9409"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d5a8de2fbaf91ea5699451a06f4074e7a312accfa774ad9327cde3e4fda2081"}, + {file = "ruff-0.0.265-py3-none-win32.whl", hash = "sha256:9e9db5ccb810742d621f93272e3cc23b5f277d8d00c4a79668835d26ccbe48dd"}, + {file = "ruff-0.0.265-py3-none-win_amd64.whl", hash = "sha256:f54facf286103006171a00ce20388d88ed1d6732db3b49c11feb9bf3d46f90e9"}, + {file = "ruff-0.0.265-py3-none-win_arm64.whl", hash = "sha256:c78470656e33d32ddc54e8482b1b0fc6de58f1195586731e5ff1405d74421499"}, + {file = "ruff-0.0.265.tar.gz", hash = "sha256:53c17f0dab19ddc22b254b087d1381b601b155acfa8feed514f0d6a413d0ab3a"}, ] [[package]] @@ -3427,4 +3427,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "102eed4faa13eab195555ea070f235acd1e3f0ff9cf028afcac6c51b3e409071" +content-hash = "ef3a16dd66177f7141239e1a2d3e07cc14c08f1e4e0c5127184d022bc062da52" diff --git a/pyproject.toml b/pyproject.toml index caf69cc53f..a571cfbebd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -311,7 +311,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.259" +ruff = "0.0.265" # Typechecking mypy = "*" From db093df5ebbf71148d32797448d9eec8b3d8bdac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:15:10 -0400 Subject: [PATCH 23/42] Bump types-setuptools from 67.6.0.5 to 67.7.0.1 (#15549) --- changelog.d/15549.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15549.misc diff --git a/changelog.d/15549.misc b/changelog.d/15549.misc new file mode 100644 index 0000000000..70573688d1 --- /dev/null +++ b/changelog.d/15549.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.6.0.5 to 67.7.0.1. diff --git a/poetry.lock b/poetry.lock index 210bea3a27..1fccac6850 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3124,14 +3124,14 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.6.0.5" +version = "67.7.0.1" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.6.0.5.tar.gz", hash = "sha256:3a708e66c7bdc620e4d0439f344c750c57a4340c895a4c3ed2d0fc4ae8eb9962"}, - {file = "types_setuptools-67.6.0.5-py3-none-any.whl", hash = "sha256:dae5a4a659dbb6dba57773440f6e2dbdd8ef282dc136a174a8a59bd33d949945"}, + {file = "types-setuptools-67.7.0.1.tar.gz", hash = "sha256:980a2651b2b019809817e1585071596b87fbafcb54433ff3b12445461db23790"}, + {file = "types_setuptools-67.7.0.1-py3-none-any.whl", hash = "sha256:471a4ecf6984ffada63ffcfa884bfcb62718bd2d1a1acf8ee5513ec99789ed5e"}, ] [[package]] From 0a18aa236df13592d45d47f6dc749c350c6bc2bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:15:22 -0400 Subject: [PATCH 24/42] Bump sentry-sdk from 1.19.1 to 1.22.1 (#15550) --- changelog.d/15550.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15550.misc diff --git a/changelog.d/15550.misc b/changelog.d/15550.misc new file mode 100644 index 0000000000..58d5594e7a --- /dev/null +++ b/changelog.d/15550.misc @@ -0,0 +1 @@ +Bump sentry-sdk from 1.19.1 to 1.22.1. diff --git a/poetry.lock b/poetry.lock index 1fccac6850..cbc8ca8ab4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2382,19 +2382,19 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.19.1" +version = "1.22.1" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.19.1.tar.gz", hash = "sha256:7ae78bd921981a5010ab540d6bdf3b793659a4db8cccf7f16180702d48a80d84"}, - {file = "sentry_sdk-1.19.1-py2.py3-none-any.whl", hash = "sha256:885a11c69df23e53eb281d003b9ff15a5bdfa43d8a2a53589be52104a1b4582f"}, + {file = "sentry-sdk-1.22.1.tar.gz", hash = "sha256:052dff5069c6f0d836ee014323576824a9b40836fc003fb12489a1f19c60a3c9"}, + {file = "sentry_sdk-1.22.1-py2.py3-none-any.whl", hash = "sha256:c6c6946f8c927adb00af1c5ab6921df38775b2199b9003816d5935a1310352d5"}, ] [package.dependencies] certifi = "*" -urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} +urllib3 = {version = ">=1.26.11,<2.0.0", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] From 058c6269f3a65d0da1315c042745f34bdcc24594 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:29:55 -0400 Subject: [PATCH 25/42] Bump serde from 1.0.160 to 1.0.162 (#15548) --- Cargo.lock | 8 ++++---- changelog.d/15548.misc | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15548.misc diff --git a/Cargo.lock b/Cargo.lock index 1085673c72..b03076d9fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.160" +version = "1.0.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.160" +version = "1.0.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" dependencies = [ "proc-macro2", "quote", diff --git a/changelog.d/15548.misc b/changelog.d/15548.misc new file mode 100644 index 0000000000..e05ddde438 --- /dev/null +++ b/changelog.d/15548.misc @@ -0,0 +1 @@ +Bump serde from 1.0.160 to 1.0.162. From 6b7da312213cb6a75af34e9f02d8e5f30cecf73c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 07:57:40 -0400 Subject: [PATCH 27/42] Bump hiredis from 2.2.2 to 2.2.3 (#15552) --- changelog.d/15552.misc | 1 + poetry.lock | 180 ++++++++++++++++++++--------------------- 2 files changed, 91 insertions(+), 90 deletions(-) create mode 100644 changelog.d/15552.misc diff --git a/changelog.d/15552.misc b/changelog.d/15552.misc new file mode 100644 index 0000000000..24972a2f8c --- /dev/null +++ b/changelog.d/15552.misc @@ -0,0 +1 @@ +Bump hiredis from 2.2.2 to 2.2.3. diff --git a/poetry.lock b/poetry.lock index cbc8ca8ab4..09d486ba51 100644 --- a/poetry.lock +++ b/poetry.lock @@ -629,101 +629,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.2.2" +version = "2.2.3" description = "Python wrapper for hiredis" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"}, - {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"}, - {file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"}, - {file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"}, - {file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"}, - {file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"}, - {file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"}, - {file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"}, - {file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"}, - {file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"}, - {file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"}, - {file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"}, - {file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"}, - {file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"}, - {file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:9a1a80a8fa767f2fdc3870316a54b84fe9fc09fa6ab6a2686783de6a228a4604"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3f006c28c885deb99b670a5a66f367a175ab8955b0374029bad7111f5357dcd4"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffaf841546905d90ff189de7397aa56413b1ce5e54547f17a98f0ebf3a3b0a3b"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cadb0ac7ba3babfd804e425946bec9717b320564a1390f163a54af9365a720a"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33bc4721632ef9708fa44e5df0066053fccc8e65410a2c48573192517a533b48"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:227c5b4bcb60f89008c275d596e4a7b6625a6b3c827b8a66ae582eace7051f71"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61995eb826009d99ed8590747bc0da683a5f4fbb4faa8788166bf3810845cd5c"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f969edc851efe23010e0f53a64269f2629a9364135e9ec81c842e8b2277d0c1"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27e560eefb57914d742a837f1da98d3b29cb22eff013c8023b7cf52ae6e051d"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3759f4789ae1913b7df278dfc9e8749205b7a106f888cd2903d19461e24a7697"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c6cb613148422c523945cdb8b6bed617856f2602fd8750e33773ede2616e55d5"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:1d274d5c511dfc03f83f997d3238eaa9b6ee3f982640979f509373cced891e98"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b7fe075e91b9d9cff40eba4fb6a8eff74964d3979a39be9a9ef58b1b4cb3604"}, + {file = "hiredis-2.2.3-cp310-cp310-win32.whl", hash = "sha256:77924b0d32fd1f493d3df15d9609ddf9d94c31a364022a6bf6b525ce9da75bea"}, + {file = "hiredis-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:dcb0569dd5bfe6004658cd0f229efa699a3169dcb4f77bd72e188adda302063d"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:d115790f18daa99b5c11a506e48923b630ef712e9e4b40482af942c3d40638b8"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c3b8be557e08b234774925622e196f0ee36fe4eab66cd19df934d3efd8f3743"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f5446068197b35a11ccc697720c41879c8657e2e761aaa8311783aac84cef20"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa17a3b22b3726d54d7af20394f65d4a1735a842a4e0f557dc67a90f6965c4bc"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7df645b6b7800e8b748c217fbd6a4ca8361bcb9a1ae6206cc02377833ec8a1aa"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fb9300959a0048138791f3d68359d61a788574ec9556bddf1fec07f2dbc5320"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d7e459fe7313925f395148d36d9b7f4f8dac65be06e45d7af356b187cef65fc"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8eceffca3941775b646cd585cd19b275d382de43cc3327d22f7c75d7b003d481"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b17baf702c6e5b4bb66e1281a3efbb1d749c9d06cdb92b665ad81e03118f78fc"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e43e2b5acaad09cf48c032f7e4926392bb3a3f01854416cf6d82ebff94d5467"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a7205497d7276a81fe92951a29616ef96562ed2f91a02066f72b6f93cb34b40e"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:126623b03c31cb6ac3e0d138feb6fcc36dd43dd34fc7da7b7a0c38b5d75bc896"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:071c5814b850574036506a8118034f97c3cbf2fe9947ff45a27b07a48da56240"}, + {file = "hiredis-2.2.3-cp311-cp311-win32.whl", hash = "sha256:d1be9e30e675f5bc1cb534633324578f6f0944a1bcffe53242cf632f554f83b6"}, + {file = "hiredis-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9a7c987e161e3c58f992c63b7e26fea7fe0777f3b975799d23d65bbb8cb5899"}, + {file = "hiredis-2.2.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f2dcb8389fa3d453927b1299f46bdb38473c293c8269d5c777d33ea0e526b610"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2df98f5e071320c7d84e8bd07c0542acdd0a7519307fc31774d60e4b842ec4f"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a72e4a523cdfc521762137559c08dfa360a3caef63620be58c699d1717dac1"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9b9e5bde7030cae83aa900b5bd660decc65afd2db8c400f3c568c815a47ca2a"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2614f17e261f72efc2f19f5e5ff2ee19e2296570c0dcf33409e22be30710de"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46525fbd84523cac75af5bf524bc74aaac848beaf31b142d2df8a787d9b4bbc4"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d1a4ce40ba11da9382c14da31f4f9e88c18f7d294f523decd0fadfb81f51ad18"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cda592405bbd29d53942e0389dc3fa77b49c362640210d7e94a10c14a677d4d"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5e6674a017629284ef373b50496d9fb1a89b85a20a7fa100ecd109484ec748e5"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:e62ec131816c6120eff40dffe43424e140264a15fa4ab88c301bd6a595913af3"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17e938d9d3ee92e1adbff361706f1c36cc60eeb3e3eeca7a3a353eae344f4c91"}, + {file = "hiredis-2.2.3-cp37-cp37m-win32.whl", hash = "sha256:95d2305fd2a7b179cacb48b10f618872fc565c175f9f62b854e8d1acac3e8a9e"}, + {file = "hiredis-2.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8f9dbe12f011a9b784f58faecc171d22465bb532c310bd588d769ba79a59ef5a"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:5a4bcef114fc071d5f52c386c47f35aae0a5b43673197b9288a15b584da8fa3a"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:232d0a70519865741ba56e1dfefd160a580ae78c30a1517bad47b3cf95a3bc7d"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9076ce8429785c85f824650735791738de7143f61f43ae9ed83e163c0ca0fa44"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec58fb7c2062f835595c12f0f02dcda76d0eb0831423cc191d1e18c9276648de"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f2b34a6444b8f9c1e9f84bd2c639388e5d14f128afd14a869dfb3d9af893aa2"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:818dfd310aa1020a13cd08ee48e116dd8c3bb2e23b8161f8ac4df587dd5093d7"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d9ea6c8d4cbdeee2e0d43379ce2881e4af0454b00570677c59f33f2531cd38"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1eadbcd3de55ac42310ff82550d3302cb4efcd4e17d76646a17b6e7004bb42b"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:477c34c4489666dc73cb5e89dafe2617c3e13da1298917f73d55aac4696bd793"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:14824e457e4f5cda685c3345d125da13949bcf3bb1c88eb5d248c8d2c3dee08f"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9cd32326dfa6ce87edf754153b0105aca64486bebe93b9600ccff74fa0b224df"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:51341e70b467004dcbec3a6ce8c478d2d6241e0f6b01e4c56764afd5022e1e9d"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2443659c76b226267e2a04dbbb21bc2a3f91aa53bdc0c22964632753ae43a247"}, + {file = "hiredis-2.2.3-cp38-cp38-win32.whl", hash = "sha256:4e3e3e31423f888d396b1fc1f936936e52af868ac1ec17dd15e3eeba9dd4de24"}, + {file = "hiredis-2.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:20f509e3a1a20d6e5f5794fc37ceb21f70f409101fcfe7a8bde783894d51b369"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:d20891e3f33803b26d54c77fd5745878497091e33f4bbbdd454cf6e71aee8890"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:50171f985e17970f87d5a29e16603d1e5b03bdbf5c2691a37e6c912942a6b657"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9944a2cac25ffe049a7e89f306e11b900640837d1ef38d9be0eaa4a4e2b73a52"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a5c8019ff94988d56eb49b15de76fe83f6b42536d76edeb6565dbf7fe14b973"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a286ded34eb16501002e3713b3130c987366eee2ba0d58c33c72f27778e31676"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e974ad15eb32b1f537730dea70b93a4c3db7b026de3ad2b59da49c6f7454d"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08415ea74c1c29b9d6a4ca3dd0e810dc1af343c1d1d442e15ba133b11ab5be6a"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e17d04ea58ab8cf3f2dc52e875db16077c6357846006780086fff3189fb199d"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6ccdcb635dae85b006592f78e32d97f4bc7541cb27829d505f9c7fefcef48298"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69536b821dd1bc78058a6e7541743f8d82bf2d981b91280b14c4daa6cdc7faba"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:3753df5f873d473f055e1f8837bfad0bd3b277c86f3c9bf058c58f14204cd901"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6f88cafe46612b6fa68e6dea49e25bebf160598bba00101caa51cc8c1f18d597"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33ee3ea5cad3a8cb339352cd230b411eb437a2e75d7736c4899acab32056ccdb"}, + {file = "hiredis-2.2.3-cp39-cp39-win32.whl", hash = "sha256:b4f3d06dc16671b88a13ae85d8ca92534c0b637d59e49f0558d040a691246422"}, + {file = "hiredis-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4f674e309cd055ee7a48304ceb8cf43265d859faf4d7d01d270ce45e976ae9d3"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8f280ab4e043b089777b43b4227bdc2035f88da5072ab36588e0ccf77d45d058"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c2a551f3b8a26f7940d6ee10b837810201754b8d7e6f6b1391655370882c5a"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c4e3c258eafaab21b174b17270a0cc093718d61cdbde8c03f85ec4bf835343"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc36a9dded458d4e37492fe3e619c6c83caae794d26ad925adbce61d592f8428"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:4ed68a3b1ccb4313d2a42546fd7e7439ad4745918a48b6c9bcaa61e1e3e42634"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3bf4b5bae472630c229518e4a814b1b68f10a3d9b00aeaec45f1a330f03a0251"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33a94d264e6e12a79d9bb8af333b01dc286b9f39c99072ab5fef94ce1f018e17"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fa6811a618653164f918b891a0fa07052bd71a799defa5c44d167cac5557b26"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af33f370be90b48bbaf0dab32decbdcc522b1fa95d109020a963282086518a8e"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b9953d87418ac228f508d93898ab572775e4d3b0eeb886a1a7734553bcdaf291"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5e7bb4dd524f50b71c20ef5a12bd61da9b463f8894b18a06130942fe31509881"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89a258424158eb8b3ed9f65548d68998da334ef155d09488c5637723eb1cd697"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f4a65276f6ecdebe75f2a53f578fbc40e8d2860658420d5e0611c56bbf5054c"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:334f2738700b20faa04a0d813366fb16ed17287430a6b50584161d5ad31ca6d7"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d194decd9608f11c777946f596f31d5aacad13972a0a87829ae1e6f2d26c1885"}, + {file = "hiredis-2.2.3.tar.gz", hash = "sha256:e75163773a309e56a9b58165cf5a50e0f84b755f6ff863b2c01a38918fe92daa"}, ] [[package]] From 2bfe3f0b8193b62a92975b1f89f6b2e0eb643091 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 9 May 2023 07:23:27 -0400 Subject: [PATCH 28/42] Use account data constants in more places. (#15554) --- changelog.d/15554.misc | 1 + synapse/api/constants.py | 1 + synapse/handlers/read_marker.py | 5 +++-- synapse/handlers/sync.py | 12 ++++++------ synapse/rest/client/account_data.py | 3 ++- 5 files changed, 13 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15554.misc diff --git a/changelog.d/15554.misc b/changelog.d/15554.misc new file mode 100644 index 0000000000..002e3f5315 --- /dev/null +++ b/changelog.d/15554.misc @@ -0,0 +1 @@ +Use account data constants in more places. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c56b2f2561..cde9a2ecef 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -257,6 +257,7 @@ class AccountDataTypes: DIRECT: Final = "m.direct" IGNORED_USER_LIST: Final = "m.ignored_user_list" TAG: Final = "m.tag" + PUSH_RULES: Final = "m.push_rules" class HistoryVisibility: diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 05122fd5a6..6d35e61880 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING +from synapse.api.constants import ReceiptTypes from synapse.util.async_helpers import Linearizer if TYPE_CHECKING: @@ -42,7 +43,7 @@ class ReadMarkerHandler: async with self.read_marker_linearizer.queue((room_id, user_id)): existing_read_marker = await self.store.get_account_data_for_room_and_type( - user_id, room_id, "m.fully_read" + user_id, room_id, ReceiptTypes.FULLY_READ ) should_update = True @@ -56,5 +57,5 @@ class ReadMarkerHandler: if should_update: content = {"event_id": event_id} await self.account_data_handler.add_account_data_to_room( - user_id, room_id, "m.fully_read", content + user_id, room_id, ReceiptTypes.FULLY_READ, content ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 64d298408d..cc05b0afa0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1777,18 +1777,18 @@ class SyncHandler: if push_rules_changed: global_account_data = dict(global_account_data) - global_account_data["m.push_rules"] = await self.push_rules_for_user( - sync_config.user - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self.push_rules_for_user(sync_config.user) else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) global_account_data = dict(all_global_account_data) - global_account_data["m.push_rules"] = await self.push_rules_for_user( - sync_config.user - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self.push_rules_for_user(sync_config.user) account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 43193ad086..8eebb21c76 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import ReceiptTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -166,7 +167,7 @@ class RoomAccountDataServlet(RestServlet): body = parse_json_object_from_request(request) - if account_data_type == "m.fully_read": + if account_data_type == ReceiptTypes.FULLY_READ: raise SynapseError( 405, "Cannot set m.fully_read through this API." From 4b4e0dc3cecbe9ad65c4728c1ec461321d15789f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 9 May 2023 10:34:10 -0400 Subject: [PATCH 29/42] Error if attempting to set m.push_rules account data, per MSC4010. (#15555) m.push_rules, like m.fully_read, is a special account data type that cannot be set using the normal /account_data endpoint. Return an error instead of allowing data that will not be used to be stored. --- changelog.d/15554.bugfix | 1 + changelog.d/15554.misc | 1 - changelog.d/15555.bugfix | 1 + synapse/config/experimental.py | 5 ++ synapse/handlers/push_rules.py | 16 +++++- synapse/handlers/sync.py | 12 +--- synapse/push/clientformat.py | 2 +- synapse/rest/client/account_data.py | 85 +++++++++++++++++++++++------ synapse/rest/client/push_rule.py | 7 +-- 9 files changed, 95 insertions(+), 35 deletions(-) create mode 100644 changelog.d/15554.bugfix delete mode 100644 changelog.d/15554.misc create mode 100644 changelog.d/15555.bugfix diff --git a/changelog.d/15554.bugfix b/changelog.d/15554.bugfix new file mode 100644 index 0000000000..0fd9de8c65 --- /dev/null +++ b/changelog.d/15554.bugfix @@ -0,0 +1 @@ +Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. diff --git a/changelog.d/15554.misc b/changelog.d/15554.misc deleted file mode 100644 index 002e3f5315..0000000000 --- a/changelog.d/15554.misc +++ /dev/null @@ -1 +0,0 @@ -Use account data constants in more places. diff --git a/changelog.d/15555.bugfix b/changelog.d/15555.bugfix new file mode 100644 index 0000000000..0fd9de8c65 --- /dev/null +++ b/changelog.d/15555.bugfix @@ -0,0 +1 @@ +Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 514d87cb2c..7af6dbcd09 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -202,3 +202,8 @@ class ExperimentalConfig(Config): # MSC4009: E.164 Matrix IDs self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) + + # MSC4010: Do not allow setting m.push_rules account data. + self.msc4010_push_rules_account_data = experimental.get( + "msc4010_push_rules_account_data", False + ) diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 813f3aa2d5..7ed88a3611 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -11,14 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import attr from synapse.api.errors import SynapseError, UnrecognizedRequestError +from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.push_rule import RuleNotFoundException from synapse.synapse_rust.push import get_base_rule_ids -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -115,6 +116,17 @@ class PushRulesHandler: stream_id = self._main_store.get_max_push_rules_stream_id() self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) + async def push_rules_for_user( + self, user: UserID + ) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: + """ + Push rules aren't really account data, but get formatted as such for /sync. + """ + user_id = user.to_string() + rules_raw = await self._main_store.get_push_rules_for_user(user_id) + rules = format_push_rules_for_user(user, rules_raw) + return rules + def check_actions(actions: List[Union[str, JsonDict]]) -> None: """Check if the given actions are spec compliant. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cc05b0afa0..c010405be6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -50,7 +50,6 @@ from synapse.logging.opentracing import ( start_active_span, trace, ) -from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary @@ -261,6 +260,7 @@ class SyncHandler: self.notifier = hs.get_notifier() self.presence_handler = hs.get_presence_handler() self._relations_handler = hs.get_relations_handler() + self._push_rules_handler = hs.get_push_rules_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -428,12 +428,6 @@ class SyncHandler: set_tag(SynapseTags.SYNC_RESULT, bool(sync_result)) return sync_result - async def push_rules_for_user(self, user: UserID) -> Dict[str, Dict[str, list]]: - user_id = user.to_string() - rules_raw = await self.store.get_push_rules_for_user(user_id) - rules = format_push_rules_for_user(user, rules_raw) - return rules - async def ephemeral_by_room( self, sync_result_builder: "SyncResultBuilder", @@ -1779,7 +1773,7 @@ class SyncHandler: global_account_data = dict(global_account_data) global_account_data[ AccountDataTypes.PUSH_RULES - ] = await self.push_rules_for_user(sync_config.user) + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id @@ -1788,7 +1782,7 @@ class SyncHandler: global_account_data = dict(all_global_account_data) global_account_data[ AccountDataTypes.PUSH_RULES - ] = await self.push_rules_for_user(sync_config.user) + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 222afbdcc8..88b52c26a0 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -22,7 +22,7 @@ from synapse.types import UserID def format_push_rules_for_user( user: UserID, ruleslist: FilteredPushRules -) -> Dict[str, Dict[str, list]]: +) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 8eebb21c76..b1f9e9dc9b 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -13,9 +13,9 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Optional, Tuple -from synapse.api.constants import ReceiptTypes +from synapse.api.constants import AccountDataTypes, ReceiptTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -30,6 +30,23 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +def _check_can_set_account_data_type(account_data_type: str) -> None: + """The fully read marker and push rules cannot be directly set via /account_data.""" + if account_data_type == ReceiptTypes.FULLY_READ: + raise SynapseError( + 405, + "Cannot set m.fully_read through this API." + " Use /rooms/!roomId:server.name/read_markers", + Codes.BAD_JSON, + ) + elif account_data_type == AccountDataTypes.PUSH_RULES: + raise SynapseError( + 405, + "Cannot set m.push_rules through this API. Use /pushrules", + Codes.BAD_JSON, + ) + + class AccountDataServlet(RestServlet): """ PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1 @@ -47,6 +64,7 @@ class AccountDataServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main self.handler = hs.get_account_data_handler() + self._push_rules_handler = hs.get_push_rules_handler() async def on_PUT( self, request: SynapseRequest, user_id: str, account_data_type: str @@ -55,6 +73,10 @@ class AccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + body = parse_json_object_from_request(request) # If experimental support for MSC3391 is enabled, then providing an empty dict @@ -78,19 +100,28 @@ class AccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") - event = await self.store.get_global_account_data_by_type_for_user( - user_id, account_data_type - ) + # Push rules are stored in a separate table and must be queried separately. + if ( + self._hs.config.experimental.msc4010_push_rules_account_data + and account_data_type == AccountDataTypes.PUSH_RULES + ): + account_data: Optional[ + JsonDict + ] = await self._push_rules_handler.push_rules_for_user(requester.user) + else: + account_data = await self.store.get_global_account_data_by_type_for_user( + user_id, account_data_type + ) - if event is None: + if account_data is None: raise NotFoundError("Account data not found") # If experimental support for MSC3391 is enabled, then this endpoint should # return a 404 if the content for an account data type is an empty dict. - if self._hs.config.experimental.msc3391_enabled and event == {}: + if self._hs.config.experimental.msc3391_enabled and account_data == {}: raise NotFoundError("Account data not found") - return 200, event + return 200, account_data class UnstableAccountDataServlet(RestServlet): @@ -109,6 +140,7 @@ class UnstableAccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() + self._hs = hs self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() @@ -122,6 +154,10 @@ class UnstableAccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot delete account data for other users.") + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + await self.handler.remove_account_data_for_user(user_id, account_data_type) return 200, {} @@ -165,9 +201,10 @@ class RoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) - body = parse_json_object_from_request(request) - - if account_data_type == ReceiptTypes.FULLY_READ: + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + elif account_data_type == ReceiptTypes.FULLY_READ: raise SynapseError( 405, "Cannot set m.fully_read through this API." @@ -175,6 +212,8 @@ class RoomAccountDataServlet(RestServlet): Codes.BAD_JSON, ) + body = parse_json_object_from_request(request) + # If experimental support for MSC3391 is enabled, then providing an empty dict # as the value for an account data type should be functionally equivalent to # calling the DELETE method on the same type. @@ -209,19 +248,26 @@ class RoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) - event = await self.store.get_account_data_for_room_and_type( - user_id, room_id, account_data_type - ) + # Room-specific push rules are not currently supported. + if ( + self._hs.config.experimental.msc4010_push_rules_account_data + and account_data_type == AccountDataTypes.PUSH_RULES + ): + account_data: Optional[JsonDict] = {} + else: + account_data = await self.store.get_account_data_for_room_and_type( + user_id, room_id, account_data_type + ) - if event is None: + if account_data is None: raise NotFoundError("Room account data not found") # If experimental support for MSC3391 is enabled, then this endpoint should # return a 404 if the content for an account data type is an empty dict. - if self._hs.config.experimental.msc3391_enabled and event == {}: + if self._hs.config.experimental.msc3391_enabled and account_data == {}: raise NotFoundError("Room account data not found") - return 200, event + return 200, account_data class UnstableRoomAccountDataServlet(RestServlet): @@ -241,6 +287,7 @@ class UnstableRoomAccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() + self._hs = hs self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() @@ -262,6 +309,10 @@ class UnstableRoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + await self.handler.remove_account_data_for_room( user_id, room_id, account_data_type ) diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 1147b6f8ec..5c9fece3ba 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -28,7 +28,6 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest.client._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException @@ -146,14 +145,12 @@ class PushRuleRestServlet(RestServlet): async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester.user.to_string() # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rules_raw = await self.store.get_push_rules_for_user(user_id) - - rules = format_push_rules_for_user(requester.user, rules_raw) + rules = await self._push_rules_handler.push_rules_for_user(requester.user) path_parts = path.split("/")[1:] From 64a11fb61fac47f652858d7e2109d077874135e0 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 9 May 2023 18:13:48 +0200 Subject: [PATCH 30/42] 1.83.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f055772ca0..9c200bfb7b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.83.0 (2023-05-09) +=========================== + +No significant changes since 1.83.0rc1. + + Synapse 1.83.0rc1 (2023-05-02) ============================== diff --git a/debian/changelog b/debian/changelog index a50fda69a8..15ff7e82c3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.83.0) stable; urgency=medium + + * New Synapse release 1.83.0. + + -- Synapse Packaging team Tue, 09 May 2023 18:13:37 +0200 + matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium * New Synapse release 1.83.0rc1. diff --git a/pyproject.toml b/pyproject.toml index caf69cc53f..346acfb048 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.83.0rc1" +version = "1.83.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ab4535b6082db97e8c48a69ea6674fe3b7c5e956 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 9 May 2023 12:08:51 -0600 Subject: [PATCH 31/42] Add config option to prevent media downloads from listed domains. (#15197) This stops media (and thumbnails) from being accessed from the listed domains. It does not delete any already locally cached media, but will prevent accessing it. Note that admin APIs are unaffected by this change. --- changelog.d/15197.feature | 1 + .../configuration/config_documentation.md | 24 +++ synapse/config/repository.py | 4 + synapse/media/media_repository.py | 9 ++ synapse/rest/media/thumbnail_resource.py | 9 ++ tests/rest/media/test_domain_blocking.py | 139 ++++++++++++++++++ 6 files changed, 186 insertions(+) create mode 100644 changelog.d/15197.feature create mode 100644 tests/rest/media/test_domain_blocking.py diff --git a/changelog.d/15197.feature b/changelog.d/15197.feature new file mode 100644 index 0000000000..c8a6f114e8 --- /dev/null +++ b/changelog.d/15197.feature @@ -0,0 +1 @@ +Add an option to prevent media downloads from configured domains. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 14c21f73fe..6dd1a639ed 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1768,6 +1768,30 @@ Example configuration: max_image_pixels: 35M ``` --- +### `prevent_media_downloads_from` + +A list of domains to never download media from. Media from these +domains that is already downloaded will not be deleted, but will be +inaccessible to users. This option does not affect admin APIs trying +to download/operate on media. + +This will not prevent the listed domains from accessing media themselves. +It simply prevents users on this server from downloading media originating +from the listed servers. + +This will have no effect on media originating from the local server. +This only affects media downloaded from other Matrix servers, to +block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist). + +Defaults to an empty list (nothing blocked). + +Example configuration: +```yaml +prevent_media_downloads_from: + - evil.example.org + - evil2.example.org +``` +--- ### `dynamic_thumbnails` Whether to generate new thumbnails on the fly to precisely match diff --git a/synapse/config/repository.py b/synapse/config/repository.py index ecb3edbe3a..655f06505b 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -137,6 +137,10 @@ class ContentRepositoryConfig(Config): self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) + self.prevent_media_downloads_from = config.get( + "prevent_media_downloads_from", [] + ) + self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store") ) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index b81e3c2b0c..e81c987b10 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -93,6 +93,7 @@ class MediaRepository: self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist ) + self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from # List of StorageProviders where we should search for media and # potentially upload to. @@ -276,6 +277,14 @@ class MediaRepository: ): raise FederationDeniedError(server_name) + # Don't let users download media from domains listed in the config, even + # if we might have the media to serve. This is Trust & Safety tooling to + # block some servers' media from being accessible to local users. + # See `prevent_media_downloads_from` config docs for more info. + if server_name in self.prevent_media_downloads_from: + respond_404(request) + return + self.mark_recently_accessed(server_name, media_id) # We linearize here to ensure that we don't try and download remote diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index a6396fb05a..661e604b85 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -60,6 +60,7 @@ class ThumbnailResource(DirectServeJsonResource): self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails self._is_mine_server_name = hs.is_mine_server_name + self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) @@ -82,6 +83,14 @@ class ThumbnailResource(DirectServeJsonResource): ) self.media_repo.mark_recently_accessed(None, media_id) else: + # Don't let users download media from configured domains, even if it + # is already downloaded. This is Trust & Safety tooling to make some + # media inaccessible to local users. + # See `prevent_media_downloads_from` config docs for more info. + if server_name in self.prevent_media_downloads_from: + respond_404(request) + return + if self.dynamic_thumbnails: await self._select_or_generate_remote_thumbnail( request, server_name, media_id, width, height, method, m_type diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py new file mode 100644 index 0000000000..9beeeab843 --- /dev/null +++ b/tests/rest/media/test_domain_blocking.py @@ -0,0 +1,139 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict + +from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource + +from synapse.media._base import FileInfo +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.test_utils import SMALL_PNG +from tests.unittest import override_config + + +class MediaDomainBlockingTests(unittest.HomeserverTestCase): + remote_media_id = "doesnotmatter" + remote_server_name = "evil.com" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + # Inject a piece of media. We'll use this to ensure we're returning a sane + # response when we're not supposed to block it, distinguishing a media block + # from a regular 404. + file_id = "abcdefg12345" + file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id) + with hs.get_media_repository().media_storage.store_into_file(file_info) as ( + f, + fname, + finish, + ): + f.write(SMALL_PNG) + self.get_success(finish()) + + self.get_success( + self.store.store_cached_remote_media( + origin=self.remote_server_name, + media_id=self.remote_media_id, + media_type="image/png", + media_length=1, + time_now_ms=clock.time_msec(), + upload_name="test.png", + filesystem_id=file_id, + ) + ) + + def create_resource_dict(self) -> Dict[str, Resource]: + # We need to manually set the resource tree to include media, the + # default only does `/_matrix/client` APIs. + return {"/_matrix/media": self.hs.get_media_repository_resource()} + + @override_config( + { + # Disable downloads from the domain we'll be trying to download from. + # Should result in a 404. + "prevent_media_downloads_from": ["evil.com"] + } + ) + def test_cannot_download_blocked_media(self) -> None: + """ + Tests to ensure that remote media which is blocked cannot be downloaded. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}", + shorthand=False, + ) + self.assertEqual(response.code, 404) + + @override_config( + { + # Disable downloads from a domain we won't be requesting downloads from. + # This proves we haven't broken anything. + "prevent_media_downloads_from": ["not-listed.com"] + } + ) + def test_remote_media_normally_unblocked(self) -> None: + """ + Tests to ensure that remote media is normally able to be downloaded + when no domain block is in place. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}", + shorthand=False, + ) + self.assertEqual(response.code, 200) + + @override_config( + { + # Disable downloads from the domain we'll be trying to download from. + # Should result in a 404. + "prevent_media_downloads_from": ["evil.com"], + "dynamic_thumbnails": True, + } + ) + def test_cannot_download_blocked_media_thumbnail(self) -> None: + """ + Same test as test_cannot_download_blocked_media but for thumbnails. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + shorthand=False, + content={"width": 100, "height": 100}, + ) + self.assertEqual(response.code, 404) + + @override_config( + { + # Disable downloads from a domain we won't be requesting downloads from. + # This proves we haven't broken anything. + "prevent_media_downloads_from": ["not-listed.com"], + "dynamic_thumbnails": True, + } + ) + def test_remote_media_thumbnail_normally_unblocked(self) -> None: + """ + Same test as test_remote_media_normally_unblocked but for thumbnails. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + shorthand=False, + ) + self.assertEqual(response.code, 200) From d3bd03559b14272dd68499ab7cff4b190858b285 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 9 May 2023 13:25:20 -0500 Subject: [PATCH 32/42] HTTP Replication Client (#15470) Separate out a HTTP client for replication in preparation for also supporting using UNIX sockets. The major difference from the base class is that this does not use treq to handle HTTP requests. --- changelog.d/15470.misc | 1 + synapse/http/client.py | 133 +++++++++++++++++++++++++- synapse/http/replicationagent.py | 150 ++++++++++++++++++++++++++++++ synapse/replication/http/_base.py | 2 +- synapse/server.py | 13 ++- tests/test_state.py | 1 + 6 files changed, 297 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15470.misc create mode 100644 synapse/http/replicationagent.py diff --git a/changelog.d/15470.misc b/changelog.d/15470.misc new file mode 100644 index 0000000000..0af0b499c6 --- /dev/null +++ b/changelog.d/15470.misc @@ -0,0 +1 @@ +Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. diff --git a/synapse/http/client.py b/synapse/http/client.py index 91fe474f36..c9479c81ff 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -74,8 +74,9 @@ from twisted.web.iweb import ( from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent +from synapse.http.replicationagent import ReplicationAgent from synapse.http.types import QueryParams -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.types import ISynapseReactor from synapse.util import json_decoder @@ -819,6 +820,136 @@ class SimpleHttpClient(BaseHttpClient): ) +class ReplicationClient(BaseHttpClient): + """Client for connecting to replication endpoints via HTTP and HTTPS. + + Attributes: + agent: The custom Twisted Agent used for constructing the connection. + """ + + def __init__( + self, + hs: "HomeServer", + ): + """ + Args: + hs: The HomeServer instance to pass in + """ + super().__init__(hs) + + # Use a pool, but a very small one. + pool = HTTPConnectionPool(self.reactor) + pool.maxPersistentPerHost = 5 + pool.cachedConnectionTimeout = 2 * 60 + + self.agent: IAgent = ReplicationAgent( + hs.get_reactor(), + contextFactory=hs.get_http_client_context_factory(), + pool=pool, + ) + + async def request( + self, + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: + """ + Make a request, differs from BaseHttpClient.request in that it does not use treq. + + Args: + method: HTTP method to use. + uri: URI to query. + data: Data to send in the request body, if applicable. + headers: Request headers. + + Returns: + Response object, once the headers have been read. + + Raises: + RequestTimedOutError if the request times out before the headers are read + + """ + outgoing_requests_counter.labels(method).inc() + + logger.debug("Sending request %s %s", method, uri) + + with start_active_span( + "outgoing-replication-request", + tags={ + tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, + tags.HTTP_METHOD: method, + tags.HTTP_URL: uri, + }, + finish_on_close=True, + ): + try: + body_producer = None + if data is not None: + body_producer = QuieterFileBodyProducer( + BytesIO(data), + cooperator=self._cooperator, + ) + + # Skip the fancy treq stuff, we don't need cookie handling, redirects, + # or buffered response bodies. + method_bytes = method.encode("ascii") + uri_bytes = uri.encode("ascii") + + # To preserve the logging context, the timeout is treated + # in a similar way to `defer.gatherResults`: + # * Each logging context-preserving fork is wrapped in + # `run_in_background`. In this case there is only one, + # since the timeout fork is not logging-context aware. + # * The `Deferred` that joins the forks back together is + # wrapped in `make_deferred_yieldable` to restore the + # logging context regardless of the path taken. + # (The logic/comments for this came from MatrixFederationHttpClient) + request_deferred = run_in_background( + self.agent.request, + method_bytes, + uri_bytes, + headers, + bodyProducer=body_producer, + ) + + # we use our own timeout mechanism rather than twisted's as a workaround + # for https://twistedmatrix.com/trac/ticket/9534. + # (Updated url https://github.com/twisted/twisted/issues/9534) + request_deferred = timeout_deferred( + request_deferred, + 60, + self.hs.get_reactor(), + ) + + # turn timeouts into RequestTimedOutErrors + request_deferred.addErrback(_timeout_to_request_timed_out_error) + + response = await make_deferred_yieldable(request_deferred) + + incoming_responses_counter.labels(method, response.code).inc() + logger.info( + "Received response to %s %s: %s", + method, + uri, + response.code, + ) + return response + except Exception as e: + incoming_responses_counter.labels(method, "ERR").inc() + logger.info( + "Error sending request to %s %s: %s %s", + method, + uri, + type(e).__name__, + e.args[0], + ) + set_tag(tags.ERROR, True) + set_tag("error_reason", e.args[0]) + raise + + def _timeout_to_request_timed_out_error(f: Failure) -> Failure: if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): # The TCP connection has its own timeout (set by the 'connectTimeout' param diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py new file mode 100644 index 0000000000..5ecd08be0f --- /dev/null +++ b/synapse/http/replicationagent.py @@ -0,0 +1,150 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Optional + +from zope.interface import implementer + +from twisted.internet import defer +from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS +from twisted.internet.interfaces import IStreamClientEndpoint +from twisted.python.failure import Failure +from twisted.web.client import URI, HTTPConnectionPool, _AgentBase +from twisted.web.error import SchemeNotSupported +from twisted.web.http_headers import Headers +from twisted.web.iweb import ( + IAgent, + IAgentEndpointFactory, + IBodyProducer, + IPolicyForHTTPS, + IResponse, +) + +from synapse.types import ISynapseReactor + +logger = logging.getLogger(__name__) + + +@implementer(IAgentEndpointFactory) +class ReplicationEndpointFactory: + """Connect to a given TCP socket""" + + def __init__( + self, + reactor: ISynapseReactor, + context_factory: IPolicyForHTTPS, + ) -> None: + self.reactor = reactor + self.context_factory = context_factory + + def endpointForURI(self, uri: URI) -> IStreamClientEndpoint: + """ + This part of the factory decides what kind of endpoint is being connected to. + + Args: + uri: The pre-parsed URI object containing all the uri data + + Returns: The correct client endpoint object + """ + if uri.scheme in (b"http", b"https"): + endpoint = HostnameEndpoint(self.reactor, uri.host, uri.port) + if uri.scheme == b"https": + endpoint = wrapClientTLS( + self.context_factory.creatorForNetloc(uri.host, uri.port), endpoint + ) + return endpoint + else: + raise SchemeNotSupported(f"Unsupported scheme: {uri.scheme!r}") + + +@implementer(IAgent) +class ReplicationAgent(_AgentBase): + """ + Client for connecting to replication endpoints via HTTP and HTTPS. + + Much of this code is copied from Twisted's twisted.web.client.Agent. + """ + + def __init__( + self, + reactor: ISynapseReactor, + contextFactory: IPolicyForHTTPS, + connectTimeout: Optional[float] = None, + bindAddress: Optional[bytes] = None, + pool: Optional[HTTPConnectionPool] = None, + ): + """ + Create a ReplicationAgent. + + Args: + reactor: A reactor for this Agent to place outgoing connections. + contextFactory: A factory for TLS contexts, to control the + verification parameters of OpenSSL. The default is to use a + BrowserLikePolicyForHTTPS, so unless you have special + requirements you can leave this as-is. + connectTimeout: The amount of time that this Agent will wait + for the peer to accept a connection. + bindAddress: The local address for client sockets to bind to. + pool: An HTTPConnectionPool instance, or None, in which + case a non-persistent HTTPConnectionPool instance will be + created. + """ + _AgentBase.__init__(self, reactor, pool) + endpoint_factory = ReplicationEndpointFactory(reactor, contextFactory) + self._endpointFactory = endpoint_factory + + def request( + self, + method: bytes, + uri: bytes, + headers: Optional[Headers] = None, + bodyProducer: Optional[IBodyProducer] = None, + ) -> "defer.Deferred[IResponse]": + """ + Issue a request to the server indicated by the given uri. + + An existing connection from the connection pool may be used or a new + one may be created. + + Currently, HTTP and HTTPS schemes are supported in uri. + + This is copied from twisted.web.client.Agent, except: + + * It uses a different pool key (combining the host & port). + * It does not call _ensureValidURI(...) since it breaks on some + UNIX paths. + + See: twisted.web.iweb.IAgent.request + """ + parsedURI = URI.fromBytes(uri) + try: + endpoint = self._endpointFactory.endpointForURI(parsedURI) + except SchemeNotSupported: + return defer.fail(Failure()) + + # This sets the Pool key to be: + # (http(s), ) + key = (parsedURI.scheme, parsedURI.netloc) + + # _requestWithEndpoint comes from _AgentBase class + return self._requestWithEndpoint( + key, + endpoint, + method, + parsedURI, + headers, + bodyProducer, + parsedURI.originForm, + ) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 8c2c54c07a..23129962e9 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -194,7 +194,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): the `instance_map` config). """ clock = hs.get_clock() - client = hs.get_simple_http_client() + client = hs.get_replication_client() local_instance_name = hs.get_instance_name() # The value of these option should match the replication listener settings diff --git a/synapse/server.py b/synapse/server.py index fd29c28173..b307295789 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -107,7 +107,11 @@ from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler -from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient +from synapse.http.client import ( + InsecureInterceptableContextFactory, + ReplicationClient, + SimpleHttpClient, +) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.media.media_repository import MediaRepository from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager @@ -471,6 +475,13 @@ class HomeServer(metaclass=abc.ABCMeta): ) return MatrixFederationHttpClient(self, tls_client_options_factory) + @cache_in_self + def get_replication_client(self) -> ReplicationClient: + """ + An HTTP client for HTTP replication. + """ + return ReplicationClient(self) + @cache_in_self def get_room_creation_handler(self) -> RoomCreationHandler: return RoomCreationHandler(self) diff --git a/tests/test_state.py b/tests/test_state.py index b20a26e1ff..2029d3d60a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -228,6 +228,7 @@ class StateTestCase(unittest.TestCase): "get_macaroon_generator", "get_instance_name", "get_simple_http_client", + "get_replication_client", "hostname", ] ) From 86d541f37c1bc9197a6f561b31f3aa359740b4bd Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Tue, 9 May 2023 22:02:36 +0300 Subject: [PATCH 33/42] Stabilize MSC2659 support for AS ping endpoint. (#15528) --- changelog.d/15528.feature | 1 + synapse/api/errors.py | 8 ++++---- synapse/appservice/api.py | 2 +- synapse/config/experimental.py | 3 --- synapse/rest/client/appservice_ping.py | 10 ++++------ synapse/rest/client/versions.py | 2 +- 6 files changed, 11 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15528.feature diff --git a/changelog.d/15528.feature b/changelog.d/15528.feature new file mode 100644 index 0000000000..aae9fa1ecf --- /dev/null +++ b/changelog.d/15528.feature @@ -0,0 +1 @@ +Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index f2d6f9ab2d..8c7c94b045 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -108,10 +108,10 @@ class Codes(str, Enum): USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL" - AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET" - AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS" - AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT" - AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED" + AS_PING_URL_NOT_SET = "M_URL_NOT_SET" + AS_PING_BAD_STATUS = "M_BAD_STATUS" + AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT" + AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED" # Attempt to send a second annotation with the same event type & annotation key # MSC2677 diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 024098e9cb..5fb3d5083d 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -326,7 +326,7 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None await self.post_json_get_json( - uri=f"{service.url}{APP_SERVICE_UNSTABLE_PREFIX}/fi.mau.msc2659/ping", + uri=f"{service.url}{APP_SERVICE_PREFIX}/ping", post_json={"transaction_id": txn_id}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 7af6dbcd09..6e453bd963 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -189,9 +189,6 @@ class ExperimentalConfig(Config): # MSC3967: Do not require UIA when first uploading cross signing keys self.msc3967_enabled = experimental.get("msc3967_enabled", False) - # MSC2659: Application service ping endpoint - self.msc2659_enabled = experimental.get("msc2659_enabled", False) - # MSC3981: Recurse relations self.msc3981_recurse_relations = experimental.get( "msc3981_recurse_relations", False diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py index 31466a4ad4..3f553d14d1 100644 --- a/synapse/rest/client/appservice_ping.py +++ b/synapse/rest/client/appservice_ping.py @@ -39,9 +39,8 @@ logger = logging.getLogger(__name__) class AppservicePingRestServlet(RestServlet): PATTERNS = client_patterns( - "/fi.mau.msc2659/appservice/(?P[^/]*)/ping", - unstable=True, - releases=(), + "/appservice/(?P[^/]*)/ping", + releases=("v1",), ) def __init__(self, hs: "HomeServer"): @@ -107,9 +106,8 @@ class AppservicePingRestServlet(RestServlet): duration = time.monotonic() - start - return HTTPStatus.OK, {"duration": int(duration * 1000)} + return HTTPStatus.OK, {"duration_ms": int(duration * 1000)} def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc2659_enabled: - AppservicePingRestServlet(hs).register(http_server) + AppservicePingRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 59aed66464..5c98916ec2 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -111,7 +111,7 @@ class VersionsRestServlet(RestServlet): # Allows moderators to fetch redacted event content as described in MSC2815 "fi.mau.msc2815": self.config.experimental.msc2815_enabled, # Adds a ping endpoint for appservices to check HS->AS connection - "fi.mau.msc2659": self.config.experimental.msc2659_enabled, + "fi.mau.msc2659.stable": True, # TODO: remove when "v1.7" is added above # Adds support for login token requests as per MSC3882 "org.matrix.msc3882": self.config.experimental.msc3882_enabled, # Adds support for remotely enabling/disabling pushers, as per MSC3881 From 7e6ad62c49104e5a17d1d35fe4c743b9fd7cd31d Mon Sep 17 00:00:00 2001 From: Reto Schneider Date: Wed, 10 May 2023 15:00:27 +0200 Subject: [PATCH 34/42] Fix documented path to largest rooms statistics endpoint. (#15560) --- changelog.d/15560.doc | 1 + docs/admin_api/statistics.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15560.doc diff --git a/changelog.d/15560.doc b/changelog.d/15560.doc new file mode 100644 index 0000000000..a552391886 --- /dev/null +++ b/changelog.d/15560.doc @@ -0,0 +1 @@ +Fix path to the `statistics/database/rooms` admin API in documentation. diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index 2bd417e900..59f07311eb 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -100,7 +100,7 @@ it should be enough to get a rough idea of where database disk space is going. The API is: ``` -GET /_synapse/admin/v1/statistics/statistics/database/rooms +GET /_synapse/admin/v1/statistics/database/rooms ``` A response body like the following is returned: From 722ccc30b5b66592099c39c3622e48fcf552d2e2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 11 May 2023 10:38:32 +0100 Subject: [PATCH 35/42] Add an unstable feature flag for MSC3391 to the /versions endpoint (#15562) --- changelog.d/15562.misc | 1 + synapse/rest/client/versions.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15562.misc diff --git a/changelog.d/15562.misc b/changelog.d/15562.misc new file mode 100644 index 0000000000..eeeb553d8f --- /dev/null +++ b/changelog.d/15562.misc @@ -0,0 +1 @@ +Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 5c98916ec2..2d2be6ef38 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -125,6 +125,8 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3912": self.config.experimental.msc3912_enabled, # Adds support for unstable "intentional mentions" behaviour. "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, + # Adds support for deleting account data. + "org.matrix.msc3391": self.config.experimental.msc3391_enabled, }, }, ) From e4f545c452df817daa2f22dfda906f3451d98351 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Thu, 11 May 2023 05:30:56 -0500 Subject: [PATCH 36/42] Remove `worker_replication_*` settings (#15491) * Add master to the instance_map as part of Complement, have ReplicationEndpoint look at instance_map for master. * Fix typo in drive by. * Remove unnecessary worker_replication_* bits from unit tests and add master to instance_map(hopefully in the right place) * Several updates: 1. Switch from master to main for naming the main process in the instance_map. Add useful constants for easier adjustment of names in the future. 2. Add backwards compatibility for worker_replication_* to allow time to transition to new style. Make sure to prioritize declaring main directly on the instance_map. 3. Clean up old comments/commented out code. 4. Adjust unit tests to match with new code. 5. Adjust Complement setup infrastructure to only add main to the instance_map if workers are used and remove now unused options from the worker.yaml template. * Initial Docs upload * Changelog * Missed some commented out code that can go now * Remove TODO comment that no longer holds true. * Fix links in docs * More docs * Remove debug logging * Apply suggestions from code review Co-authored-by: reivilibre * Apply suggestions from code review Co-authored-by: reivilibre * Update version to latest, include completeish before/after examples in upgrade notes. * Fix up and docs too --------- Co-authored-by: reivilibre --- changelog.d/15491.misc | 1 + docker/conf-workers/worker.yaml.j2 | 4 - docker/configure_workers_and_start.py | 15 +++- .../workers/generic_worker.yaml | 4 - docs/upgrade.md | 78 +++++++++++++++++++ .../configuration/config_documentation.md | 20 +++-- docs/workers.md | 41 +++++++--- synapse/config/workers.py | 78 +++++++++++++++---- synapse/replication/http/_base.py | 16 ++-- tests/module_api/test_api.py | 1 + tests/replication/_base.py | 8 +- tests/replication/test_auth.py | 3 - tests/replication/test_client_reader_shard.py | 2 - .../test_sharded_event_persister.py | 1 + 14 files changed, 206 insertions(+), 66 deletions(-) create mode 100644 changelog.d/15491.misc diff --git a/changelog.d/15491.misc b/changelog.d/15491.misc new file mode 100644 index 0000000000..98f88dbf19 --- /dev/null +++ b/changelog.d/15491.misc @@ -0,0 +1 @@ +Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2 index 42131afc95..44c6e413cf 100644 --- a/docker/conf-workers/worker.yaml.j2 +++ b/docker/conf-workers/worker.yaml.j2 @@ -6,10 +6,6 @@ worker_app: "{{ app }}" worker_name: "{{ name }}" -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_listeners: - type: http port: {{ port }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 4beec3daaf..79b5b87397 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -69,6 +69,9 @@ import yaml from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 +MAIN_PROCESS_INSTANCE_NAME = "main" +MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1" +MAIN_PROCESS_REPLICATION_PORT = 9093 # A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced # during processing with the name of the worker. @@ -719,8 +722,8 @@ def generate_worker_files( # shared config file. listeners = [ { - "port": 9093, - "bind_address": "127.0.0.1", + "port": MAIN_PROCESS_REPLICATION_PORT, + "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS, "type": "http", "resources": [{"names": ["replication"]}], } @@ -870,6 +873,14 @@ def generate_worker_files( workers_in_use = len(requested_worker_types) > 0 + # If there are workers, add the main process to the instance_map too. + if workers_in_use: + instance_map = shared_config.setdefault("instance_map", {}) + instance_map[MAIN_PROCESS_INSTANCE_NAME] = { + "host": MAIN_PROCESS_LOCALHOST_ADDRESS, + "port": MAIN_PROCESS_REPLICATION_PORT, + } + # Shared homeserver config convert( "/conf/shared.yaml.j2", diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml index a858f99ed1..db6436ee6e 100644 --- a/docs/systemd-with-workers/workers/generic_worker.yaml +++ b/docs/systemd-with-workers/workers/generic_worker.yaml @@ -1,10 +1,6 @@ worker_app: synapse.app.generic_worker worker_name: generic_worker1 -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_listeners: - type: http port: 8083 diff --git a/docs/upgrade.md b/docs/upgrade.md index 0886b03115..0625de8afb 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,84 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.84.0 + +## Deprecation of `worker_replication_*` configuration settings + +When using workers, +* `worker_replication_host` +* `worker_replication_http_port` +* `worker_replication_http_tls` + +can now be removed from individual worker YAML configuration ***if*** you add the main process to the `instance_map` in the shared YAML configuration, +using the name `main`. + +### Before: +Shared YAML +```yaml +instance_map: + generic_worker1: + host: localhost + port: 5678 + tls: false +``` +Worker YAML +```yaml +worker_app: synapse.app.generic_worker +worker_name: generic_worker1 + +worker_replication_host: localhost +worker_replication_http_port: 3456 +worker_replication_http_tls: false + +worker_listeners: + - type: http + port: 1234 + resources: + - names: [client, federation] + - type: http + port: 5678 + resources: + - names: [replication] + +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml +``` +### After: +Shared YAML +```yaml +instance_map: + main: + host: localhost + port: 3456 + tls: false + generic_worker1: + host: localhost + port: 5678 + tls: false +``` +Worker YAML +```yaml +worker_app: synapse.app.generic_worker +worker_name: generic_worker1 + +worker_listeners: + - type: http + port: 1234 + resources: + - names: [client, federation] + - type: http + port: 5678 + resources: + - names: [replication] + +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml + +``` +Notes: +* `tls` is optional but mirrors the functionality of `worker_replication_http_tls` + + + # Upgrading to v1.81.0 ## Application service path & authentication deprecations diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6dd1a639ed..dc965b4119 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3884,15 +3884,20 @@ federation_sender_instances: ### `instance_map` When using workers this should be a map from [`worker_name`](#worker_name) to the -HTTP replication listener of the worker, if configured. +HTTP replication listener of the worker, if configured, and to the main process. Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs a HTTP replication listener, and that listener should be included in the `instance_map`. -(The main process also needs an HTTP replication listener, but it should not be -listed in the `instance_map`.) +The main process also needs an entry on the `instance_map`, and it should be listed under +`main` **if even one other worker exists**. Ensure the port matches with what is declared +inside the `listener` block for a `replication` listener. + Example configuration: ```yaml instance_map: + main: + host: localhost + port: 8030 worker1: host: localhost port: 8034 @@ -4024,6 +4029,7 @@ worker_name: generic_worker1 ``` --- ### `worker_replication_host` +*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* The HTTP replication endpoint that it should talk to on the main Synapse process. The main Synapse process defines this with a `replication` resource in @@ -4035,6 +4041,7 @@ worker_replication_host: 127.0.0.1 ``` --- ### `worker_replication_http_port` +*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* The HTTP replication port that it should talk to on the main Synapse process. The main Synapse process defines this with a `replication` resource in @@ -4046,6 +4053,7 @@ worker_replication_http_port: 9093 ``` --- ### `worker_replication_http_tls` +*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* Whether TLS should be used for talking to the HTTP replication port on the main Synapse process. @@ -4071,9 +4079,9 @@ A worker can handle HTTP requests. To do so, a `worker_listeners` option must be declared, in the same way as the [`listeners` option](#listeners) in the shared config. -Workers declared in [`stream_writers`](#stream_writers) will need to include a -`replication` listener here, in order to accept internal HTTP requests from -other workers. +Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map) + will need to include a `replication` listener here, in order to accept internal HTTP +requests from other workers. Example configuration: ```yaml diff --git a/docs/workers.md b/docs/workers.md index 765f03c263..991814c0bc 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -87,12 +87,18 @@ shared configuration file. ### Shared configuration -Normally, only a couple of changes are needed to make an existing configuration -file suitable for use with workers. First, you need to enable an +Normally, only a few changes are needed to make an existing configuration +file suitable for use with workers: +* First, you need to enable an ["HTTP replication listener"](usage/configuration/config_documentation.md#listeners) -for the main process; and secondly, you need to enable -[redis-based replication](usage/configuration/config_documentation.md#redis). -Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) +for the main process +* Secondly, you need to enable +[redis-based replication](usage/configuration/config_documentation.md#redis) +* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map) +with the `main` process defined, as well as the relevant connection information from +it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined +is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to. +* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) can be used to authenticate HTTP traffic between workers. For example: ```yaml @@ -111,6 +117,11 @@ worker_replication_secret: "" redis: enabled: true + +instance_map: + main: + host: 'localhost' + port: 9093 ``` See the [configuration manual](usage/configuration/config_documentation.md) @@ -130,13 +141,13 @@ In the config file for each worker, you must specify: * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)). The currently available worker applications are listed [below](#available-worker-applications). * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)). - * The HTTP replication endpoint that it should talk to on the main synapse process - ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and - [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option with an `http` listener. * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer. + * **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process + ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and + [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map` For example: @@ -417,11 +428,14 @@ effects of bursts of events from that bridge on events sent by normal users. Additionally, the writing of specific streams (such as events) can be moved off of the main process to a particular worker. -To enable this, the worker must have a -[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured, -have a [`worker_name`](usage/configuration/config_documentation.md#worker_name) +To enable this, the worker must have: +* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured, +* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name) and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map) -config. The same worker can handle multiple streams, but unless otherwise documented, +config. +* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well. + +Note: The same worker can handle multiple streams, but unless otherwise documented, each stream can only have a single writer. For example, to move event persistence off to a dedicated worker, the shared @@ -429,6 +443,9 @@ configuration would include: ```yaml instance_map: + main: + host: localhost + port: 8030 event_persister1: host: localhost port: 8034 diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 95b4047f1d..d2311cc857 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -39,6 +39,19 @@ The '%s' configuration option is deprecated and will be removed in a future Synapse version. Please use ``%s: name_of_worker`` instead. """ +_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """ +Missing data for a worker to connect to main process. Please include '%s' in the +`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated +solution) in every worker's yaml as various `worker_replication_*` settings as defined +in workers documentation here: +`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration` +""" +# This allows for a handy knob when it's time to change from 'master' to +# something with less 'history' +MAIN_PROCESS_INSTANCE_NAME = "master" +# Use this to adjust what the main process is known as in the yaml instance_map +MAIN_PROCESS_INSTANCE_MAP_NAME = "main" + logger = logging.getLogger(__name__) @@ -161,27 +174,15 @@ class WorkerConfig(Config): raise ConfigError("worker_log_config must be a string") self.worker_log_config = worker_log_config - # The host used to connect to the main synapse - self.worker_replication_host = config.get("worker_replication_host", None) - # The port on the main synapse for TCP replication if "worker_replication_port" in config: raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",)) - # The port on the main synapse for HTTP replication endpoint - self.worker_replication_http_port = config.get("worker_replication_http_port") - - # The tls mode on the main synapse for HTTP replication endpoint. - # For backward compatibility this defaults to False. - self.worker_replication_http_tls = config.get( - "worker_replication_http_tls", False - ) - # The shared secret used for authentication when connecting to the main synapse. self.worker_replication_secret = config.get("worker_replication_secret", None) self.worker_name = config.get("worker_name", self.worker_app) - self.instance_name = self.worker_name or "master" + self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME # FIXME: Remove this check after a suitable amount of time. self.worker_main_http_uri = config.get("worker_main_http_uri", None) @@ -215,12 +216,55 @@ class WorkerConfig(Config): ) # A map from instance name to host/port of their HTTP replication endpoint. + # Check if the main process is declared. Inject it into the map if it's not, + # based first on if a 'main' block is declared then on 'worker_replication_*' + # data. If both are available, default to instance_map. The main process + # itself doesn't need this data as it would never have to talk to itself. + instance_map: Dict[str, Any] = config.get("instance_map", {}) + + if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + # The host used to connect to the main synapse + main_host = config.get("worker_replication_host", None) + + # The port on the main synapse for HTTP replication endpoint + main_port = config.get("worker_replication_http_port") + + # The tls mode on the main synapse for HTTP replication endpoint. + # For backward compatibility this defaults to False. + main_tls = config.get("worker_replication_http_tls", False) + + # For now, accept 'main' in the instance_map, but the replication system + # expects 'master', force that into being until it's changed later. + if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map: + instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[ + MAIN_PROCESS_INSTANCE_MAP_NAME + ] + del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME] + + # This is the backwards compatibility bit that handles the + # worker_replication_* bits using setdefault() to not overwrite anything. + elif main_host is not None and main_port is not None: + instance_map.setdefault( + MAIN_PROCESS_INSTANCE_NAME, + { + "host": main_host, + "port": main_port, + "tls": main_tls, + }, + ) + + else: + # If we've gotten here, it means that the main process is not on the + # instance_map and that not enough worker_replication_* variables + # were declared in the worker's yaml. + raise ConfigError( + _MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA + % MAIN_PROCESS_INSTANCE_MAP_NAME + ) + self.instance_map: Dict[ str, InstanceLocationConfig - ] = parse_and_validate_mapping( - config.get("instance_map", {}), - InstanceLocationConfig, - ) + ] = parse_and_validate_mapping(instance_map, InstanceLocationConfig) # Map from type of streams to source, c.f. WriterLocations. writers = config.get("stream_writers") or {} diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 23129962e9..dc7820f963 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -25,6 +25,7 @@ from twisted.internet.error import ConnectError, DNSLookupError from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError +from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME from synapse.http import RequestTimedOutError from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request @@ -197,11 +198,6 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): client = hs.get_replication_client() local_instance_name = hs.get_instance_name() - # The value of these option should match the replication listener settings - master_host = hs.config.worker.worker_replication_host - master_port = hs.config.worker.worker_replication_http_port - master_tls = hs.config.worker.worker_replication_http_tls - instance_map = hs.config.worker.instance_map outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME) @@ -213,7 +209,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): ) @trace_with_opname("outgoing_replication_request") - async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: + async def send_request( + *, instance_name: str = MAIN_PROCESS_INSTANCE_NAME, **kwargs: Any + ) -> Any: # We have to pull these out here to avoid circular dependencies... streams = hs.get_replication_command_handler().get_streams_to_replicate() replication = hs.get_replication_data_handler() @@ -221,11 +219,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") - if instance_name == "master": - host = master_host - port = master_port - tls = master_tls - elif instance_name in instance_map: + if instance_name in instance_map: host = instance_map[instance_name].host port = instance_map[instance_name].port tls = instance_map[instance_name].tls diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 758b4bc38b..bff7114cd8 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -837,6 +837,7 @@ class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCa conf = super().default_config() conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, "presence_writer": {"host": "testserv", "port": 1001}, } return conf diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 0f1a8a145f..eb9b1f1cd9 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -110,8 +110,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" + config["instance_map"] = {"main": {"host": "testserv", "port": 8765}} return config def _build_replication_data_handler(self) -> "TestReplicationDataHandler": @@ -249,6 +248,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): """ base = super().default_config() base["redis"] = {"enabled": True} + base["instance_map"] = {"main": {"host": "testserv", "port": 8765}} return base def setUp(self) -> None: @@ -310,7 +310,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): def make_worker_hs( self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any ) -> HomeServer: - """Make a new worker HS instance, correctly connecting replcation + """Make a new worker HS instance, correctly connecting replication stream to the master HS. Args: @@ -388,8 +388,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" return config def replicate(self) -> None: diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py index 98602371e4..f7bca0063d 100644 --- a/tests/replication/test_auth.py +++ b/tests/replication/test_auth.py @@ -43,9 +43,6 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" - return config def _test_register(self) -> FakeChannel: diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index eca5033761..a18859099f 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -29,8 +29,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" return config def test_register_single_worker(self) -> None: diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 7f9cc67e73..4623d737fb 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -50,6 +50,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): conf = super().default_config() conf["stream_writers"] = {"events": ["worker1", "worker2"]} conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, "worker1": {"host": "testserv", "port": 1001}, "worker2": {"host": "testserv", "port": 1002}, } From 5bf9ec9e3e2c54448708f5d534aa50a68d680cc0 Mon Sep 17 00:00:00 2001 From: V02460 Date: Thu, 11 May 2023 13:40:55 +0200 Subject: [PATCH 37/42] Require at least poetry-core v1.2.0 (#15566) Signed-off-by: Kai A. Hiller --- changelog.d/15566.bugfix | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15566.bugfix diff --git a/changelog.d/15566.bugfix b/changelog.d/15566.bugfix new file mode 100644 index 0000000000..07fac520f6 --- /dev/null +++ b/changelog.d/15566.bugfix @@ -0,0 +1 @@ +Require at least poetry-core v1.2.0. diff --git a/pyproject.toml b/pyproject.toml index 684f65b4a0..640c6c4601 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -368,7 +368,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"] +requires = ["poetry-core>=1.2.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"] build-backend = "poetry.core.masonry.api" From 2611433b70fc30c436f6b9b950a3bcc533b3df5b Mon Sep 17 00:00:00 2001 From: Roel ter Maat Date: Thu, 11 May 2023 14:02:51 +0200 Subject: [PATCH 38/42] Add redis SSL configuration options (#15312) * Add SSL options to redis config * fix lint issues * Add documentation and changelog file * add missing . at the end of the changelog * Move client context factory to new file * Rename ssl to tls and fix typo * fix lint issues * Added when redis attributes were added --- changelog.d/15312.feature | 1 + contrib/docker_compose_workers/README.md | 4 +++ .../configuration/config_documentation.md | 11 ++++++ synapse/config/redis.py | 6 ++++ synapse/replication/tcp/context.py | 34 +++++++++++++++++++ synapse/replication/tcp/handler.py | 29 ++++++++++++---- synapse/replication/tcp/redis.py | 27 +++++++++++---- 7 files changed, 98 insertions(+), 14 deletions(-) create mode 100644 changelog.d/15312.feature create mode 100644 synapse/replication/tcp/context.py diff --git a/changelog.d/15312.feature b/changelog.d/15312.feature new file mode 100644 index 0000000000..e4e972cfeb --- /dev/null +++ b/changelog.d/15312.feature @@ -0,0 +1 @@ +Add redis TLS configuration options. \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md index d3cdfe5614..ebb225fba6 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md @@ -70,6 +70,10 @@ redis: port: 6379 # dbid: # password: + # use_tls: True + # certificate_file: + # private_key_file: + # ca_file: ``` This assumes that your Redis service is called `redis` in your Docker Compose file. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index dc965b4119..93b132b6e4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3981,9 +3981,16 @@ This setting has the following sub-options: localhost and 6379 * `password`: Optional password if configured on the Redis instance. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. +* `use_tls`: Whether to use tls connection. Defaults to false. +* `certificate_file`: Optional path to the certificate file +* `private_key_file`: Optional path to the private key file +* `ca_file`: Optional path to the CA certificate file. Use this one or: +* `ca_path`: Optional path to the folder containing the CA certificate file _Added in Synapse 1.78.0._ + _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + Example configuration: ```yaml redis: @@ -3992,6 +3999,10 @@ redis: port: 6379 password: dbid: + #use_tls: True + #certificate_file: + #private_key_file: + #ca_file: ``` --- ## Individual worker configuration diff --git a/synapse/config/redis.py b/synapse/config/redis.py index e6a75be434..636cb450b8 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -35,3 +35,9 @@ class RedisConfig(Config): self.redis_port = redis_config.get("port", 6379) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") + + self.redis_use_tls = redis_config.get("use_tls", False) + self.redis_certificate = redis_config.get("certificate_file", None) + self.redis_private_key = redis_config.get("private_key_file", None) + self.redis_ca_file = redis_config.get("ca_file", None) + self.redis_ca_path = redis_config.get("ca_path", None) diff --git a/synapse/replication/tcp/context.py b/synapse/replication/tcp/context.py new file mode 100644 index 0000000000..4688b2200b --- /dev/null +++ b/synapse/replication/tcp/context.py @@ -0,0 +1,34 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from OpenSSL.SSL import Context +from twisted.internet import ssl + +from synapse.config.redis import RedisConfig + + +class ClientContextFactory(ssl.ClientContextFactory): + def __init__(self, redis_config: RedisConfig): + self.redis_config = redis_config + + def getContext(self) -> Context: + ctx = super().getContext() + if self.redis_config.redis_certificate: + ctx.use_certificate_file(self.redis_config.redis_certificate) + if self.redis_config.redis_private_key: + ctx.use_privatekey_file(self.redis_config.redis_private_key) + if self.redis_config.redis_ca_file: + ctx.load_verify_locations(cafile=self.redis_config.redis_ca_file) + elif self.redis_config.redis_ca_path: + ctx.load_verify_locations(capath=self.redis_config.redis_ca_path) + return ctx diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 2290b3e6fe..233ad61d49 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -46,6 +46,7 @@ from synapse.replication.tcp.commands import ( UserIpCommand, UserSyncCommand, ) +from synapse.replication.tcp.context import ClientContextFactory from synapse.replication.tcp.protocol import IReplicationConnection from synapse.replication.tcp.streams import ( STREAMS_MAP, @@ -348,13 +349,27 @@ class ReplicationCommandHandler: outbound_redis_connection, channel_names=self._channels_to_subscribe_to, ) - hs.get_reactor().connectTCP( - hs.config.redis.redis_host, - hs.config.redis.redis_port, - self._factory, - timeout=30, - bindAddress=None, - ) + + reactor = hs.get_reactor() + redis_config = hs.config.redis + if hs.config.redis.redis_use_tls: + ssl_context_factory = ClientContextFactory(hs.config.redis) + reactor.connectSSL( + redis_config.redis_host, + redis_config.redis_port, + self._factory, + ssl_context_factory, + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + redis_config.redis_host, + redis_config.redis_port, + self._factory, + timeout=30, + bindAddress=None, + ) def get_streams(self) -> Dict[str, Stream]: """Get a map from stream name to all streams.""" diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index dfc061eb5e..c8f4bf8b27 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -35,6 +35,7 @@ from synapse.replication.tcp.commands import ( ReplicateCommand, parse_command_from_line, ) +from synapse.replication.tcp.context import ClientContextFactory from synapse.replication.tcp.protocol import ( IReplicationConnection, tcp_inbound_commands_counter, @@ -386,12 +387,24 @@ def lazyConnection( factory.continueTrying = reconnect reactor = hs.get_reactor() - reactor.connectTCP( - host, - port, - factory, - timeout=30, - bindAddress=None, - ) + + if hs.config.redis.redis_use_tls: + ssl_context_factory = ClientContextFactory(hs.config.redis) + reactor.connectSSL( + host, + port, + factory, + ssl_context_factory, + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + host, + port, + factory, + timeout=30, + bindAddress=None, + ) return factory.handler From 5a7742a83308743dfd5e9a1fab751e345757aab7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 11 May 2023 17:22:47 +0100 Subject: [PATCH 39/42] Allow `pip install` to use setuptools_rust 1.6.0 (#15570) * Allow `pip install` to use setuptools_rust 1.6.0 This was bumped by dependabot in #15512, but we didn't bump also raise the version guard here. I don't know how we can avoid this happening in the future. Closes #15461. Spotted in [1] by @landryb. [1]: https://github.com/matrix-org/synapse/issues/15461#issuecomment-1543513934 * Changelog --- changelog.d/15570.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15570.misc diff --git a/changelog.d/15570.misc b/changelog.d/15570.misc new file mode 100644 index 0000000000..ee04509981 --- /dev/null +++ b/changelog.d/15570.misc @@ -0,0 +1 @@ +Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. diff --git a/pyproject.toml b/pyproject.toml index 640c6c4601..6bbc914af6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -368,7 +368,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.2.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"] +requires = ["poetry-core>=1.2.0,<=1.5.0", "setuptools_rust>=1.3,<=1.6.0"] build-backend = "poetry.core.masonry.api" From d19d1edbcf78a58da3483ecf51f107fedb1f3fd0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 11 May 2023 11:50:46 -0500 Subject: [PATCH 40/42] Print full startup/initialization error (#15569) I found the error in the **Before** really vague and obtuse and didn't realize port `5432` corresponded to the Postgres port until searching the codebase. It says to check the logs but that wasn't my first instinct. It's just more obvious if we just print the full thing which gives context of the error type and the traceback to the relevant area of code. #### Before ``` $ poetry run python -m synapse.app.homeserver -c homeserver.yaml ********************************************************************************** Error during initialisation: connection to server at "localhost" (::1), port 5432 failed: Connection refused Is the server running on that host and accepting TCP/IP connections? connection to server at "localhost" (127.0.0.1), port 5432 failed: Connection refused Is the server running on that host and accepting TCP/IP connections? There may be more information in the logs. ********************************************************************************** ``` #### After ```sh $ poetry run python -m synapse.app.homeserver -c homeserver.yaml ********************************************************************************** Error during initialisation: Traceback (most recent call last): File "/home/eric/Documents/github/element/synapse/synapse/app/homeserver.py", line 352, in setup hs.setup() File "/home/eric/Documents/github/element/synapse/synapse/server.py", line 337, in setup self.datastores = Databases(self.DATASTORE_CLASS, self) File "/home/eric/Documents/github/element/synapse/synapse/storage/databases/__init__.py", line 65, in __init__ with make_conn(database_config, engine, "startup") as db_conn: File "/home/eric/Documents/github/element/synapse/synapse/storage/database.py", line 161, in make_conn native_db_conn = engine.module.connect(**db_params) File "/home/eric/.cache/pypoetry/virtualenvs/matrix-synapse-xCtC9ulO-py3.10/lib/python3.10/site-packages/psycopg2/__init__.py", line 122, in connect conn = _connect(dsn, connection_factory=connection_factory, **kwasync) psycopg2.OperationalError: connection to server at "localhost" (::1), port 5432 failed: Connection refused Is the server running on that host and accepting TCP/IP connections? connection to server at "localhost" (127.0.0.1), port 5432 failed: Connection refused Is the server running on that host and accepting TCP/IP connections? There may be more information in the logs. ********************************************************************************** ``` --- changelog.d/15569.feature | 1 + synapse/app/_base.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15569.feature diff --git a/changelog.d/15569.feature b/changelog.d/15569.feature new file mode 100644 index 0000000000..b58af8ad55 --- /dev/null +++ b/changelog.d/15569.feature @@ -0,0 +1 @@ +Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 7f83b34d89..4dfcf484fa 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -21,6 +21,7 @@ import socket import sys import traceback import warnings +from textwrap import indent from typing import ( TYPE_CHECKING, Any, @@ -212,8 +213,12 @@ def handle_startup_exception(e: Exception) -> NoReturn: # Exceptions that occur between setting up the logging and forking or starting # the reactor are written to the logs, followed by a summary to stderr. logger.exception("Exception during startup") + + error_string = "".join(traceback.format_exception(e)) + indented_error_string = indent(error_string, " ") + quit_with_error( - f"Error during initialisation:\n {e}\nThere may be more information in the logs." + f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs." ) From 7c76514f1e412d49dbde7070841de1a68400ccde Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 11 May 2023 19:24:32 +0100 Subject: [PATCH 41/42] Deal with more GHA deprecations (#15576) * Bump netlify PR * Manually cache mypy cache dir cache cache cache cache cache cache cache cache cache cache * Changelog --- .github/workflows/docs-pr-netlify.yaml | 2 +- .github/workflows/tests.yml | 15 ++++++++------- changelog.d/15576.misc | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15576.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index d613dd9e26..928bcae8cf 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -22,7 +22,7 @@ jobs: path: book - name: 📤 Deploy to Netlify - uses: matrix-org/netlify-pr-preview@v1 + uses: matrix-org/netlify-pr-preview@v2 with: path: book owner: ${{ github.event.workflow_run.head_repository.owner.login }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4333f55a53..e128fd54f7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -107,14 +107,15 @@ jobs: uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - # NB: I have two concerns with this action: - # 1. We occasionally see odd mypy problems that aren't reproducible - # locally with clean caches. I suspect some dodgy caching behaviour. - # 2. The action uses GHA machinery that's deprecated - # (https://github.com/AustinScola/mypy-cache-github-action/issues/277) - # It may be simpler to use actions/cache ourselves to restore .mypy_cache. + # Cribbed from + # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17 - name: Restore/persist mypy's cache - uses: AustinScola/mypy-cache-github-action@df56268388422ee282636ee2c7a9cc55ec644a41 + uses: actions/cache@v3 + with: + path: | + .mypy_cache + key: mypy-cache-${{ github.context.sha }} + restore-keys: mypy-cache- - name: Run mypy run: poetry run mypy diff --git a/changelog.d/15576.misc b/changelog.d/15576.misc new file mode 100644 index 0000000000..9df71999d9 --- /dev/null +++ b/changelog.d/15576.misc @@ -0,0 +1 @@ +Deal with upcoming Github Actions deprecations. From 08297f2f18667634ad990fd4af469d6c68c70c7d Mon Sep 17 00:00:00 2001 From: helix-loop <81391886+helix-loop@users.noreply.github.com> Date: Fri, 12 May 2023 11:32:09 +0200 Subject: [PATCH 42/42] Add pkg-config package to Stage 0 (#15567) --- changelog.d/15567.docker | 1 + docker/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15567.docker diff --git a/changelog.d/15567.docker b/changelog.d/15567.docker new file mode 100644 index 0000000000..8995bc1bd7 --- /dev/null +++ b/changelog.d/15567.docker @@ -0,0 +1 @@ +Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. diff --git a/docker/Dockerfile b/docker/Dockerfile index 3d07bcd71f..6107dced43 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -37,7 +37,7 @@ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - build-essential curl git libffi-dev libssl-dev \ + build-essential curl git libffi-dev libssl-dev pkg-config \ && rm -rf /var/lib/apt/lists/* # Install rust and ensure its in the PATH.