From 706456de1f63a4b9796592fd452da7ce30735274 Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Tue, 26 Apr 2022 17:31:52 +0300 Subject: [PATCH 01/87] Mark Dockerfile as requiring BuildKit (#12541) Co-authored-by: David Robertson --- changelog.d/12541.docker | 1 + docker/Dockerfile | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/12541.docker diff --git a/changelog.d/12541.docker b/changelog.d/12541.docker new file mode 100644 index 0000000000..c3b9c31657 --- /dev/null +++ b/changelog.d/12541.docker @@ -0,0 +1 @@ +Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. diff --git a/docker/Dockerfile b/docker/Dockerfile index 4523c60645..ccc6a9f778 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # Dockerfile to build the matrixdotorg/synapse docker images. # # Note that it uses features which are only available in BuildKit - see From 63ba9ba38b47bc20074e2c83492c4d113cfe21b8 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 26 Apr 2022 20:14:21 +0100 Subject: [PATCH 02/87] Bound ephemeral events by key (#12544) Co-authored-by: Brad Murray Co-authored-by: Andrew Morgan --- changelog.d/12544.bugfix | 1 + synapse/handlers/appservice.py | 4 +- synapse/handlers/receipts.py | 4 +- tests/handlers/test_appservice.py | 82 +++++++++++++++++++++++++++++++ 4 files changed, 87 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12544.bugfix diff --git a/changelog.d/12544.bugfix b/changelog.d/12544.bugfix new file mode 100644 index 0000000000..b5169cd831 --- /dev/null +++ b/changelog.d/12544.bugfix @@ -0,0 +1 @@ +Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 1b57840506..b3894666cc 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -416,7 +416,7 @@ class ApplicationServicesHandler: return typing async def _handle_receipts( - self, service: ApplicationService, new_token: Optional[int] + self, service: ApplicationService, new_token: int ) -> List[JsonDict]: """ Return the latest read receipts that the given application service should receive. @@ -447,7 +447,7 @@ class ApplicationServicesHandler: receipts_source = self.event_sources.sources.receipt receipts, _ = await receipts_source.get_new_events_as( - service=service, from_key=from_key + service=service, from_key=from_key, to_key=new_token ) return receipts diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 6250bb3bdf..cfe860decc 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -239,13 +239,14 @@ class ReceiptEventSource(EventSource[int, JsonDict]): return events, to_key async def get_new_events_as( - self, from_key: int, service: ApplicationService + self, from_key: int, to_key: int, service: ApplicationService ) -> Tuple[List[JsonDict], int]: """Returns a set of new read receipt events that an appservice may be interested in. Args: from_key: the stream position at which events should be fetched from + to_key: the stream position up to which events should be fetched to service: The appservice which may be interested Returns: @@ -255,7 +256,6 @@ class ReceiptEventSource(EventSource[int, JsonDict]): * The current read receipt stream token. """ from_key = int(from_key) - to_key = self.get_current_key() if from_key == to_key: return [], to_key diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 8c72cf6b30..5b0cd1ab86 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -411,6 +411,88 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): "exclusive_as_user", "password", self.exclusive_as_user_device_id ) + def test_sending_read_receipt_batches_to_application_services(self): + """Tests that a large batch of read receipts are sent correctly to + interested application services. + """ + # Register an application service that's interested in a certain user + # and room prefix + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + } + ], + ApplicationService.NS_ROOMS: [ + { + "regex": "!fakeroom_.*", + "exclusive": True, + } + ], + }, + ) + + # "Complete" a transaction. + # All this really does for us is make an entry in the application_services_state + # database table, which tracks the current stream_token per stream ID per AS. + self.get_success( + self.hs.get_datastores().main.complete_appservice_txn( + 0, + interested_appservice, + ) + ) + + # Now, pretend that we receive a large burst of read receipts (300 total) that + # all come in at once. + for i in range(300): + self.get_success( + # Insert a fake read receipt into the database + self.hs.get_datastores().main.insert_receipt( + # We have to use unique room ID + user ID combinations here, as the db query + # is an upsert. + room_id=f"!fakeroom_{i}:test", + receipt_type="m.read", + user_id=self.local_user, + event_ids=[f"$eventid_{i}"], + data={}, + ) + ) + + # Now notify the appservice handler that 300 read receipts have all arrived + # at once. What will it do! + # note: stream tokens start at 2 + for stream_token in range(2, 303): + self.get_success( + self.hs.get_application_service_handler()._notify_interested_services_ephemeral( + services=[interested_appservice], + stream_key="receipt_key", + new_token=stream_token, + users=[self.exclusive_as_user], + ) + ) + + # Using our txn send mock, we can see what the AS received. After iterating over every + # transaction, we'd like to see all 300 read receipts accounted for. + # No more, no less. + all_ephemeral_events = [] + for call in self.send_mock.call_args_list: + ephemeral_events = call[0][2] + all_ephemeral_events += ephemeral_events + + # Ensure that no duplicate events were sent + self.assertEqual(len(all_ephemeral_events), 300) + + # Check that the ephemeral event is a read receipt with the expected structure + latest_read_receipt = all_ephemeral_events[-1] + self.assertEqual(latest_read_receipt["type"], "m.receipt") + + event_id = list(latest_read_receipt["content"].keys())[0] + self.assertEqual( + latest_read_receipt["content"][event_id]["m.read"], {self.local_user: {}} + ) + @unittest.override_config( {"experimental_features": {"msc2409_to_device_messages_enabled": True}} ) From b76f1a4d5f918def1f643910939b80e9e035e07f Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 27 Apr 2022 14:05:00 +0200 Subject: [PATCH 03/87] Add some type hints to datastore (#12485) --- changelog.d/12485.misc | 1 + synapse/storage/databases/main/__init__.py | 21 +++-- synapse/storage/databases/main/appservice.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 79 +++++++++++++------ synapse/storage/databases/main/devices.py | 51 +++++++----- .../storage/databases/main/group_server.py | 4 +- synapse/storage/databases/main/keys.py | 15 ++-- .../databases/main/media_repository.py | 13 ++- synapse/storage/databases/main/presence.py | 19 ++++- synapse/storage/databases/main/pusher.py | 49 ++++++++---- synapse/storage/databases/main/state.py | 4 +- synapse/storage/databases/main/ui_auth.py | 12 +-- 12 files changed, 188 insertions(+), 84 deletions(-) create mode 100644 changelog.d/12485.misc diff --git a/changelog.d/12485.misc b/changelog.d/12485.misc new file mode 100644 index 0000000000..e793d08e5e --- /dev/null +++ b/changelog.d/12485.misc @@ -0,0 +1 @@ +Add some type hints to datastore. \ No newline at end of file diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 951031af50..5895b89202 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,12 +15,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple, cast from synapse.config.homeserver import HomeServerConfig -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.databases.main.stats import UserSortOrder -from synapse.storage.engines import PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.types import Cursor from synapse.storage.util.id_generators import ( IdGenerator, MultiWriterIdGenerator, @@ -266,7 +271,9 @@ class DataStore( A tuple of a list of mappings from user to information and a count of total users. """ - def get_users_paginate_txn(txn): + def get_users_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], int]: filters = [] args = [self.hs.config.server.server_name] @@ -301,7 +308,7 @@ class DataStore( """ sql = "SELECT COUNT(*) as total_users " + sql_base txn.execute(sql, args) - count = txn.fetchone()[0] + count = cast(Tuple[int], txn.fetchone())[0] sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, @@ -338,7 +345,9 @@ class DataStore( ) -def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig): +def check_database_before_upgrade( + cur: Cursor, database_engine: BaseDatabaseEngine, config: HomeServerConfig +) -> None: """Called before upgrading an existing database to check that it is broadly sane compared with the configuration. """ diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index fa732edcca..945707b0ec 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Pattern, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Pattern, Tuple, cast from synapse.appservice import ( ApplicationService, @@ -83,7 +83,7 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): txn.execute( "SELECT COALESCE(max(txn_id), 0) FROM application_services_txns" ) - return txn.fetchone()[0] # type: ignore + return cast(Tuple[int], txn.fetchone())[0] self._as_txn_seq_gen = build_sequence_generator( db_conn, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index b4a1b041b1..599b418383 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -14,7 +14,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple, cast +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + cast, +) from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace @@ -118,7 +128,13 @@ class DeviceInboxWorkerStore(SQLBaseStore): prefilled_cache=device_outbox_prefill, ) - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, + stream_name: str, + instance_name: str, + token: int, + rows: Iterable[ToDeviceStream.ToDeviceStreamRow], + ) -> None: if stream_name == ToDeviceStream.NAME: # If replication is happening than postgres must be being used. assert isinstance(self._device_inbox_id_gen, MultiWriterIdGenerator) @@ -134,7 +150,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): ) return super().process_replication_rows(stream_name, instance_name, token, rows) - def get_to_device_stream_token(self): + def get_to_device_stream_token(self) -> int: return self._device_inbox_id_gen.get_current_token() async def get_messages_for_user_devices( @@ -301,7 +317,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): if not user_ids_to_query: return {}, to_stream_id - def get_device_messages_txn(txn: LoggingTransaction): + def get_device_messages_txn( + txn: LoggingTransaction, + ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]: # Build a query to select messages from any of the given devices that # are between the given stream id bounds. @@ -428,7 +446,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): log_kv({"message": "No changes in cache since last check"}) return 0 - def delete_messages_for_device_txn(txn): + def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: sql = ( "DELETE FROM device_inbox" " WHERE user_id = ? AND device_id = ?" @@ -455,15 +473,14 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace async def get_new_device_msgs_for_remote( - self, destination, last_stream_id, current_stream_id, limit - ) -> Tuple[List[dict], int]: + self, destination: str, last_stream_id: int, current_stream_id: int, limit: int + ) -> Tuple[List[JsonDict], int]: """ Args: - destination(str): The name of the remote server. - last_stream_id(int|long): The last position of the device message stream + destination: The name of the remote server. + last_stream_id: The last position of the device message stream that the server sent up to. - current_stream_id(int|long): The current position of the device - message stream. + current_stream_id: The current position of the device message stream. Returns: A list of messages for the device and where in the stream the messages got to. """ @@ -485,7 +502,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): return [], last_stream_id @trace - def get_new_messages_for_remote_destination_txn(txn): + def get_new_messages_for_remote_destination_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], int]: sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" " WHERE destination = ?" @@ -527,7 +546,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): up_to_stream_id: Where to delete messages up to. """ - def delete_messages_for_remote_destination_txn(txn): + def delete_messages_for_remote_destination_txn(txn: LoggingTransaction) -> None: sql = ( "DELETE FROM device_federation_outbox" " WHERE destination = ?" @@ -566,7 +585,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): if last_id == current_id: return [], current_id, False - def get_all_new_device_messages_txn(txn): + def get_all_new_device_messages_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: # We limit like this as we might have multiple rows per stream_id, and # we want to make sure we always get all entries for any stream_id # we return. @@ -607,8 +628,8 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace async def add_messages_to_device_inbox( self, - local_messages_by_user_then_device: dict, - remote_messages_by_destination: dict, + local_messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], + remote_messages_by_destination: Dict[str, JsonDict], ) -> int: """Used to send messages from this server. @@ -624,7 +645,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): assert self._can_write_to_device - def add_messages_txn(txn, now_ms, stream_id): + def add_messages_txn( + txn: LoggingTransaction, now_ms: int, stream_id: int + ) -> None: # Add the local messages directly to the local inbox. self._add_messages_to_local_device_inbox_txn( txn, stream_id, local_messages_by_user_then_device @@ -677,11 +700,16 @@ class DeviceInboxWorkerStore(SQLBaseStore): return self._device_inbox_id_gen.get_current_token() async def add_messages_from_remote_to_device_inbox( - self, origin: str, message_id: str, local_messages_by_user_then_device: dict + self, + origin: str, + message_id: str, + local_messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], ) -> int: assert self._can_write_to_device - def add_messages_txn(txn, now_ms, stream_id): + def add_messages_txn( + txn: LoggingTransaction, now_ms: int, stream_id: int + ) -> None: # Check if we've already inserted a matching message_id for that # origin. This can happen if the origin doesn't receive our # acknowledgement from the first time we received the message. @@ -727,8 +755,11 @@ class DeviceInboxWorkerStore(SQLBaseStore): return stream_id def _add_messages_to_local_device_inbox_txn( - self, txn, stream_id, messages_by_user_then_device - ): + self, + txn: LoggingTransaction, + stream_id: int, + messages_by_user_then_device: Dict[str, Dict[str, JsonDict]], + ) -> None: assert self._can_write_to_device local_by_user_then_device = {} @@ -840,8 +871,10 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): self._remove_dead_devices_from_device_inbox, ) - async def _background_drop_index_device_inbox(self, progress, batch_size): - def reindex_txn(conn): + async def _background_drop_index_device_inbox( + self, progress: JsonDict, batch_size: int + ) -> int: + def reindex_txn(conn: LoggingDatabaseConnection) -> None: txn = conn.cursor() txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id") txn.close() diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 483dd80406..2df4dd4ed4 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -25,6 +25,7 @@ from typing import ( Optional, Set, Tuple, + cast, ) from synapse.api.errors import Codes, StoreError @@ -136,7 +137,9 @@ class DeviceWorkerStore(SQLBaseStore): Number of devices of this users. """ - def count_devices_by_users_txn(txn, user_ids): + def count_devices_by_users_txn( + txn: LoggingTransaction, user_ids: List[str] + ) -> int: sql = """ SELECT count(*) FROM devices @@ -149,7 +152,7 @@ class DeviceWorkerStore(SQLBaseStore): ) txn.execute(sql + clause, args) - return txn.fetchone()[0] + return cast(Tuple[int], txn.fetchone())[0] if not user_ids: return 0 @@ -468,7 +471,7 @@ class DeviceWorkerStore(SQLBaseStore): """ txn.execute(sql, (destination, from_stream_id, now_stream_id, limit)) - return list(txn) + return cast(List[Tuple[str, str, int, Optional[str]]], txn.fetchall()) async def _get_device_update_edus_by_remote( self, @@ -549,7 +552,7 @@ class DeviceWorkerStore(SQLBaseStore): async def _get_last_device_update_for_remote_user( self, destination: str, user_id: str, from_stream_id: int ) -> int: - def f(txn): + def f(txn: LoggingTransaction) -> int: prev_sent_id_sql = """ SELECT coalesce(max(stream_id), 0) as stream_id FROM device_lists_outbound_last_success @@ -767,7 +770,7 @@ class DeviceWorkerStore(SQLBaseStore): if not user_ids_to_check: return set() - def _get_users_whose_devices_changed_txn(txn): + def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]: changes = set() stream_id_where_clause = "stream_id > ?" @@ -966,7 +969,9 @@ class DeviceWorkerStore(SQLBaseStore): async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None: """Mark that we no longer track device lists for remote user.""" - def _mark_remote_user_device_list_as_unsubscribed_txn(txn): + def _mark_remote_user_device_list_as_unsubscribed_txn( + txn: LoggingTransaction, + ) -> None: self.db_pool.simple_delete_txn( txn, table="device_lists_remote_extremeties", @@ -1004,7 +1009,7 @@ class DeviceWorkerStore(SQLBaseStore): ) def _store_dehydrated_device_txn( - self, txn, user_id: str, device_id: str, device_data: str + self, txn: LoggingTransaction, user_id: str, device_id: str, device_data: str ) -> Optional[str]: old_device_id = self.db_pool.simple_select_one_onecol_txn( txn, @@ -1081,7 +1086,7 @@ class DeviceWorkerStore(SQLBaseStore): """ yesterday = self._clock.time_msec() - prune_age - def _prune_txn(txn): + def _prune_txn(txn: LoggingTransaction) -> None: # look for (user, destination) pairs which have an update older than # the cutoff. # @@ -1204,8 +1209,10 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): "drop_device_lists_outbound_last_success_non_unique_idx", ) - async def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size): - def f(conn): + async def _drop_device_list_streams_non_unique_indexes( + self, progress: JsonDict, batch_size: int + ) -> int: + def f(conn: LoggingDatabaseConnection) -> None: txn = conn.cursor() txn.execute("DROP INDEX IF EXISTS device_lists_remote_cache_id") txn.execute("DROP INDEX IF EXISTS device_lists_remote_extremeties_id") @@ -1217,7 +1224,9 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): ) return 1 - async def _remove_duplicate_outbound_pokes(self, progress, batch_size): + async def _remove_duplicate_outbound_pokes( + self, progress: JsonDict, batch_size: int + ) -> int: # for some reason, we have accumulated duplicate entries in # device_lists_outbound_pokes, which makes prune_outbound_device_list_pokes less # efficient. @@ -1230,7 +1239,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""}, ) - def _txn(txn): + def _txn(txn: LoggingTransaction) -> int: clause, args = make_tuple_comparison_clause( [(x, last_row[x]) for x in KEY_COLS] ) @@ -1602,7 +1611,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): context = get_active_span_text_map() - def add_device_changes_txn(txn, stream_ids): + def add_device_changes_txn( + txn: LoggingTransaction, stream_ids: List[int] + ) -> None: self._add_device_change_to_stream_txn( txn, user_id, @@ -1635,8 +1646,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): txn: LoggingTransaction, user_id: str, device_ids: Collection[str], - stream_ids: List[str], - ): + stream_ids: List[int], + ) -> None: txn.call_after( self._device_list_stream_cache.entity_has_changed, user_id, @@ -1720,7 +1731,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): user_id: str, device_ids: Iterable[str], room_ids: Collection[str], - stream_ids: List[str], + stream_ids: List[int], context: Dict[str, str], ) -> None: """Record the user in the room has updated their device.""" @@ -1775,7 +1786,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): LIMIT ? """ - def get_uncoverted_outbound_room_pokes_txn(txn): + def get_uncoverted_outbound_room_pokes_txn( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]: txn.execute(sql, (limit,)) return [ @@ -1808,7 +1821,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): Marks the associated row in `device_lists_changes_in_room` as handled. """ - def add_device_list_outbound_pokes_txn(txn, stream_ids: List[int]): + def add_device_list_outbound_pokes_txn( + txn: LoggingTransaction, stream_ids: List[int] + ) -> None: if hosts: self._add_device_outbound_poke_to_stream_txn( txn, diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index 0aef121d83..04efad9e9a 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -522,7 +522,9 @@ class GroupServerWorkerStore(SQLBaseStore): desc="get_joined_groups", ) - async def get_all_groups_for_user(self, user_id, now_token) -> List[JsonDict]: + async def get_all_groups_for_user( + self, user_id: str, now_token: int + ) -> List[JsonDict]: def _get_all_groups_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]: sql = """ SELECT group_id, type, membership, u.content diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 6990f3ed1d..0a19f607bd 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -15,11 +15,12 @@ import itertools import logging -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Optional, Tuple from signedjson.key import decode_verify_key_bytes from synapse.storage._base import SQLBaseStore +from synapse.storage.database import LoggingTransaction from synapse.storage.keys import FetchKeyResult from synapse.storage.types import Cursor from synapse.util.caches.descriptors import cached, cachedList @@ -35,7 +36,9 @@ class KeyStore(SQLBaseStore): """Persistence for signature verification keys""" @cached() - def _get_server_verify_key(self, server_name_and_key_id): + def _get_server_verify_key( + self, server_name_and_key_id: Tuple[str, str] + ) -> FetchKeyResult: raise NotImplementedError() @cachedList( @@ -179,19 +182,21 @@ class KeyStore(SQLBaseStore): async def get_server_keys_json( self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]] - ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]: + ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]: """Retrieve the key json for a list of server_keys and key ids. If no keys are found for a given server, key_id and source then that server, key_id, and source triplet entry will be an empty list. The JSON is returned as a byte array so that it can be efficiently used in an HTTP response. Args: - server_keys (list): List of (server_name, key_id, source) triplets. + server_keys: List of (server_name, key_id, source) triplets. Returns: A mapping from (server_name, key_id, source) triplets to a list of dicts """ - def _get_server_keys_json_txn(txn): + def _get_server_keys_json_txn( + txn: LoggingTransaction, + ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]: results = {} for server_name, key_id, from_server in server_keys: keyvalues = {"server_name": server_name} diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 322ed05390..40ac377ca9 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -388,7 +388,14 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): return await self.db_pool.runInteraction("get_url_cache", get_url_cache_txn) async def store_url_cache( - self, url, response_code, etag, expires_ts, og, media_id, download_ts + self, + url: str, + response_code: int, + etag: Optional[str], + expires_ts: int, + og: Optional[str], + media_id: str, + download_ts: int, ) -> None: await self.db_pool.simple_insert( "local_media_repository_url_cache", @@ -441,7 +448,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): ) async def get_cached_remote_media( - self, origin, media_id: str + self, origin: str, media_id: str ) -> Optional[Dict[str, Any]]: return await self.db_pool.simple_select_one( "remote_media_cache", @@ -608,7 +615,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): ) async def delete_remote_media(self, media_origin: str, media_id: str) -> None: - def delete_remote_media_txn(txn): + def delete_remote_media_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_delete_txn( txn, "remote_media_cache", diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index d3c4611686..b47c511450 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple, cast +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Tuple, cast from synapse.api.presence import PresenceState, UserPresenceState from synapse.replication.tcp.streams import PresenceStream @@ -103,7 +103,9 @@ class PresenceStore(PresenceBackgroundUpdateStore): prefilled_cache=presence_cache_prefill, ) - async def update_presence(self, presence_states) -> Tuple[int, int]: + async def update_presence( + self, presence_states: List[UserPresenceState] + ) -> Tuple[int, int]: assert self._can_persist_presence stream_ordering_manager = self._presence_id_gen.get_next_mult( @@ -121,7 +123,10 @@ class PresenceStore(PresenceBackgroundUpdateStore): return stream_orderings[-1], self._presence_id_gen.get_current_token() def _update_presence_txn( - self, txn: LoggingTransaction, stream_orderings, presence_states + self, + txn: LoggingTransaction, + stream_orderings: List[int], + presence_states: List[UserPresenceState], ) -> None: for stream_id, state in zip(stream_orderings, presence_states): txn.call_after( @@ -405,7 +410,13 @@ class PresenceStore(PresenceBackgroundUpdateStore): self._presence_on_startup = [] return active_on_startup - def process_replication_rows(self, stream_name, instance_name, token, rows) -> None: + def process_replication_rows( + self, + stream_name: str, + instance_name: str, + token: int, + rows: Iterable[Any], + ) -> None: if stream_name == PresenceStream.NAME: self._presence_id_gen.advance(instance_name, token) for row in rows: diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index cf64cd63a4..91286c9b65 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -14,11 +14,25 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + cast, +) from synapse.push import PusherConfig, ThrottleParams from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder @@ -117,7 +131,7 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(ret) async def get_all_pushers(self) -> Iterator[PusherConfig]: - def get_pushers(txn): + def get_pushers(txn: LoggingTransaction) -> Iterator[PusherConfig]: txn.execute("SELECT * FROM pushers") rows = self.db_pool.cursor_to_dict(txn) @@ -152,7 +166,9 @@ class PusherWorkerStore(SQLBaseStore): if last_id == current_id: return [], current_id, False - def get_all_updated_pushers_rows_txn(txn): + def get_all_updated_pushers_rows_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: sql = """ SELECT id, user_name, app_id, pushkey FROM pushers @@ -160,10 +176,13 @@ class PusherWorkerStore(SQLBaseStore): ORDER BY id ASC LIMIT ? """ txn.execute(sql, (last_id, current_id, limit)) - updates = [ - (stream_id, (user_name, app_id, pushkey, False)) - for stream_id, user_name, app_id, pushkey in txn - ] + updates = cast( + List[Tuple[int, tuple]], + [ + (stream_id, (user_name, app_id, pushkey, False)) + for stream_id, user_name, app_id, pushkey in txn + ], + ) sql = """ SELECT stream_id, user_id, app_id, pushkey @@ -192,12 +211,12 @@ class PusherWorkerStore(SQLBaseStore): ) @cached(num_args=1, max_entries=15000) - async def get_if_user_has_pusher(self, user_id: str): + async def get_if_user_has_pusher(self, user_id: str) -> None: # This only exists for the cachedList decorator raise NotImplementedError() async def update_pusher_last_stream_ordering( - self, app_id, pushkey, user_id, last_stream_ordering + self, app_id: str, pushkey: str, user_id: str, last_stream_ordering: int ) -> None: await self.db_pool.simple_update_one( "pushers", @@ -291,7 +310,7 @@ class PusherWorkerStore(SQLBaseStore): last_user = progress.get("last_user", "") - def _delete_pushers(txn) -> int: + def _delete_pushers(txn: LoggingTransaction) -> int: sql = """ SELECT name FROM users @@ -339,7 +358,7 @@ class PusherWorkerStore(SQLBaseStore): last_pusher = progress.get("last_pusher", 0) - def _delete_pushers(txn) -> int: + def _delete_pushers(txn: LoggingTransaction) -> int: sql = """ SELECT p.id, access_token FROM pushers AS p @@ -396,7 +415,7 @@ class PusherWorkerStore(SQLBaseStore): last_pusher = progress.get("last_pusher", 0) - def _delete_pushers(txn) -> int: + def _delete_pushers(txn: LoggingTransaction) -> int: sql = """ SELECT p.id, p.user_name, p.app_id, p.pushkey @@ -502,7 +521,7 @@ class PusherStore(PusherWorkerStore): async def delete_pusher_by_app_id_pushkey_user_id( self, app_id: str, pushkey: str, user_id: str ) -> None: - def delete_pusher_txn(txn, stream_id): + def delete_pusher_txn(txn: LoggingTransaction, stream_id: int) -> None: self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_if_user_has_pusher, (user_id,) ) @@ -547,7 +566,7 @@ class PusherStore(PusherWorkerStore): # account. pushers = list(await self.get_pushers_by_user_id(user_id)) - def delete_pushers_txn(txn, stream_ids): + def delete_pushers_txn(txn: LoggingTransaction, stream_ids: List[int]) -> None: self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_if_user_has_pusher, (user_id,) ) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index e653841fe5..5e340bd03d 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -370,10 +370,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def _update_state_for_partial_state_event_txn( self, - txn, + txn: LoggingTransaction, event: EventBase, context: EventContext, - ): + ) -> None: # we shouldn't have any outliers here assert not event.internal_metadata.is_outlier() diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 2d339b6008..f38bedbbcd 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -131,7 +131,7 @@ class UIAuthWorkerStore(SQLBaseStore): session_id: str, stage_type: str, result: Union[str, bool, JsonDict], - ): + ) -> None: """ Mark a session stage as completed. @@ -200,7 +200,9 @@ class UIAuthWorkerStore(SQLBaseStore): desc="set_ui_auth_client_dict", ) - async def set_ui_auth_session_data(self, session_id: str, key: str, value: Any): + async def set_ui_auth_session_data( + self, session_id: str, key: str, value: Any + ) -> None: """ Store a key-value pair into the sessions data associated with this request. This data is stored server-side and cannot be modified by @@ -223,7 +225,7 @@ class UIAuthWorkerStore(SQLBaseStore): def _set_ui_auth_session_data_txn( self, txn: LoggingTransaction, session_id: str, key: str, value: Any - ): + ) -> None: # Get the current value. result = cast( Dict[str, Any], @@ -275,7 +277,7 @@ class UIAuthWorkerStore(SQLBaseStore): session_id: str, user_agent: str, ip: str, - ): + ) -> None: """Add the given user agent / IP to the tracking table""" await self.db_pool.simple_upsert( table="ui_auth_sessions_ips", @@ -318,7 +320,7 @@ class UIAuthWorkerStore(SQLBaseStore): def _delete_old_ui_auth_sessions_txn( self, txn: LoggingTransaction, expiration_time: int - ): + ) -> None: # Get the expired sessions. sql = "SELECT session_id FROM ui_auth_sessions WHERE creation_time <= ?" txn.execute(sql, [expiration_time]) From e8d1ec0e92da96a01b6c723fcdb4eac27f801e87 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 27 Apr 2022 13:57:53 +0100 Subject: [PATCH 04/87] Add option to enable token registration without requiring 3pids (#12526) --- changelog.d/12526.feature | 1 + docs/sample_config.yaml | 6 ++++++ synapse/config/registration.py | 9 +++++++++ synapse/handlers/ui_auth/checkers.py | 4 +++- synapse/rest/client/register.py | 7 ++++++- 5 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12526.feature diff --git a/changelog.d/12526.feature b/changelog.d/12526.feature new file mode 100644 index 0000000000..c01596282c --- /dev/null +++ b/changelog.d/12526.feature @@ -0,0 +1 @@ +Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b8d8c0dbf0..67184c6b1a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1323,6 +1323,12 @@ oembed: # #registration_requires_token: true +# Allow users to submit a token during registration to bypass any required 3pid +# steps configured in `registrations_require_3pid`. +# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. +# +#enable_registration_token_3pid_bypass: false + # If set, allows registration of standard or admin accounts by anyone who # has the shared secret, even if registration is otherwise disabled. # diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 39e9acb62a..70eb7e6a97 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -43,6 +43,9 @@ class RegistrationConfig(Config): self.registration_requires_token = config.get( "registration_requires_token", False ) + self.enable_registration_token_3pid_bypasss = config.get( + "enable_registration_token_3pid_bypasss", False + ) self.registration_shared_secret = config.get("registration_shared_secret") self.bcrypt_rounds = config.get("bcrypt_rounds", 12) @@ -309,6 +312,12 @@ class RegistrationConfig(Config): # #registration_requires_token: true + # Allow users to submit a token during registration to bypass any required 3pid + # steps configured in `registrations_require_3pid`. + # Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. + # + #enable_registration_token_3pid_bypass: false + # If set, allows registration of standard or admin accounts by anyone who # has the shared secret, even if registration is otherwise disabled. # diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 472b029af3..e2a441066d 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -256,7 +256,9 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self._enabled = bool(hs.config.registration.registration_requires_token) + self._enabled = bool( + hs.config.registration.registration_requires_token + ) or bool(hs.config.registration.enable_registration_token_3pid_bypasss) self.store = hs.get_datastores().main def is_enabled(self) -> bool: diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 70baf50fa4..13ef6b35a0 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -929,6 +929,10 @@ def _calculate_registration_flows( # always let users provide both MSISDN & email flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY]) + # Add a flow that doesn't require any 3pids, if the config requests it. + if config.registration.enable_registration_token_3pid_bypasss: + flows.append([LoginType.REGISTRATION_TOKEN]) + # Prepend m.login.terms to all flows if we're requiring consent if config.consent.user_consent_at_registration: for flow in flows: @@ -942,7 +946,8 @@ def _calculate_registration_flows( # Prepend registration token to all flows if we're requiring a token if config.registration.registration_requires_token: for flow in flows: - flow.insert(0, LoginType.REGISTRATION_TOKEN) + if LoginType.REGISTRATION_TOKEN not in flow: + flow.insert(0, LoginType.REGISTRATION_TOKEN) return flows From 8a23bde82352f18d7d6f098e3f3f0ce7099efb86 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 27 Apr 2022 09:00:07 -0400 Subject: [PATCH 05/87] Consistently use collections.abc.Mapping to check frozendict. (#12564) --- changelog.d/12564.misc | 1 + synapse/events/utils.py | 7 ++++--- synapse/handlers/relations.py | 4 ++-- synapse/storage/databases/main/state.py | 5 ++--- synapse/util/frozenutils.py | 3 ++- 5 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12564.misc diff --git a/changelog.d/12564.misc b/changelog.d/12564.misc new file mode 100644 index 0000000000..207c322464 --- /dev/null +++ b/changelog.d/12564.misc @@ -0,0 +1 @@ +Consistently check if an object is a `frozendict`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f8d3ba5456..a6c48308b3 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -27,7 +27,6 @@ from typing import ( ) import attr -from frozendict import frozendict from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.errors import Codes, SynapseError @@ -204,7 +203,9 @@ def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None: key_to_move = field.pop(-1) sub_dict = src for sub_field in field: # e.g. sub_field => "content" - if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]: + if sub_field in sub_dict and isinstance( + sub_dict[sub_field], collections.abc.Mapping + ): sub_dict = sub_dict[sub_field] else: return @@ -622,7 +623,7 @@ def validate_canonicaljson(value: Any) -> None: # Note that Infinity, -Infinity, and NaN are also considered floats. raise SynapseError(400, "Bad JSON value: float", Codes.BAD_JSON) - elif isinstance(value, (dict, frozendict)): + elif isinstance(value, collections.abc.Mapping): for v in value.values(): validate_canonicaljson(v) diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 5efb561273..b5dc9f74b3 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import collections.abc import logging from typing import ( TYPE_CHECKING, @@ -24,7 +25,6 @@ from typing import ( ) import attr -from frozendict import frozendict from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError @@ -380,7 +380,7 @@ class RelationsHandler: # Do not bundle aggregations for an event which represents an edit or an # annotation. It does not make sense for them to have related events. relates_to = event.content.get("m.relates_to") - if isinstance(relates_to, (dict, frozendict)): + if isinstance(relates_to, collections.abc.Mapping): relation_type = relates_to.get("rel_type") if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): continue diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5e340bd03d..18ae8aee29 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -12,11 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import collections.abc import logging from typing import TYPE_CHECKING, Collection, Dict, Iterable, Optional, Set, Tuple -from frozendict import frozendict - from synapse.api.constants import EventTypes, Membership from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion @@ -160,7 +159,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): predecessor = create_event.content.get("predecessor", None) # Ensure the key is a dictionary - if not isinstance(predecessor, (dict, frozendict)): + if not isinstance(predecessor, collections.abc.Mapping): return None # The keys must be strings since the data is JSON. diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 9c405eb4d7..7223af1a36 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import collections.abc from typing import Any from frozendict import frozendict @@ -35,7 +36,7 @@ def freeze(o: Any) -> Any: def unfreeze(o: Any) -> Any: - if isinstance(o, (dict, frozendict)): + if isinstance(o, collections.abc.Mapping): return {k: unfreeze(v) for k, v in o.items()} if isinstance(o, (bytes, str)): From 646324437543c096e737777c81b4fe4b45c3e1a7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 27 Apr 2022 14:03:44 +0100 Subject: [PATCH 06/87] Remove unused `# type: ignore`s (#12531) Over time we've begun to use newer versions of mypy, typeshed, stub packages---and of course we've improved our own annotations. This makes some type ignore comments no longer necessary. I have removed them. There was one exception: a module that imports `select.epoll`. The ignore is redundant on Linux, but I've kept it ignored for those of us who work on the source tree using not-Linux. (#11771) I'm more interested in the config line which enforces this. I want unused ignores to be reported, because I think it's useful feedback when annotating to know when you've fixed a problem you had to previously ignore. * Installing extras before typechecking Lacking an easy way to install all extras generically, let's bite the bullet and make install the hand-maintained `all` extra before typechecking. Now that https://github.com/matrix-org/backend-meta/pull/6 is merged to the release/v1 branch. --- .github/workflows/tests.yml | 8 ++------ changelog.d/12531.misc | 1 + mypy.ini | 6 ++++++ stubs/sortedcontainers/sorteddict.pyi | 4 +--- synapse/app/_base.py | 4 ++-- synapse/config/server.py | 6 ++---- synapse/federation/federation_server.py | 4 ++-- synapse/federation/transport/client.py | 10 +++++----- synapse/handlers/auth.py | 2 +- synapse/handlers/oidc.py | 2 +- synapse/handlers/search.py | 6 +++--- synapse/http/server.py | 4 ++-- synapse/module_api/__init__.py | 4 +++- .../databases/main/monthly_active_users.py | 6 +++--- synapse/storage/prepare_database.py | 4 ++-- synapse/util/caches/ttlcache.py | 2 +- tests/handlers/test_register.py | 9 +++------ tests/module_api/test_account_data_manager.py | 17 +++++++++++------ .../replication/test_federation_sender_shard.py | 8 ++++---- tests/test_utils/__init__.py | 8 ++++---- tests/test_utils/logging_setup.py | 2 +- 21 files changed, 60 insertions(+), 57 deletions(-) create mode 100644 changelog.d/12531.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cad4cb6d77..efa35b71df 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,13 +20,9 @@ jobs: - run: scripts-dev/config-lint.sh lint: - # This does a vanilla `poetry install` - no extras. I'm slightly anxious - # that we might skip some typechecks on code that uses extras. However, - # I think the right way to fix this is to mark any extras needed for - # typechecking as development dependencies. To detect this, we ought to - # turn up mypy's strictness: disallow unknown imports and be accept fewer - # uses of `Any`. uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1" + with: + typechecking-extras: "all" lint-crlf: runs-on: ubuntu-latest diff --git a/changelog.d/12531.misc b/changelog.d/12531.misc new file mode 100644 index 0000000000..412fc9b6dc --- /dev/null +++ b/changelog.d/12531.misc @@ -0,0 +1 @@ +Remove unused `# type: ignore`s. diff --git a/mypy.ini b/mypy.ini index a663bf6975..280f1e898e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -7,6 +7,7 @@ show_error_codes = True show_traceback = True mypy_path = stubs warn_unreachable = True +warn_unused_ignores = True local_partial_types = True no_implicit_optional = True @@ -134,6 +135,11 @@ disallow_untyped_defs = True [mypy-synapse.metrics.*] disallow_untyped_defs = True +[mypy-synapse.metrics._reactor_metrics] +# This module imports select.epoll. That exists on Linux, but doesn't on macOS. +# See https://github.com/matrix-org/synapse/pull/11771. +warn_unused_ignores = False + [mypy-synapse.module_api.*] disallow_untyped_defs = True diff --git a/stubs/sortedcontainers/sorteddict.pyi b/stubs/sortedcontainers/sorteddict.pyi index e18d617281..3a4f9c3076 100644 --- a/stubs/sortedcontainers/sorteddict.pyi +++ b/stubs/sortedcontainers/sorteddict.pyi @@ -115,9 +115,7 @@ class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]): def __getitem__(self, index: slice) -> List[_KT_co]: ... def __delitem__(self, index: Union[int, slice]) -> None: ... -class SortedItemsView( # type: ignore - ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]] -): +class SortedItemsView(ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]): def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ... @overload def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ... diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 37321f9133..d28b87a3f4 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -48,7 +48,6 @@ from twisted.logger import LoggingFile, LogLevel from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.python.threadpool import ThreadPool -import synapse from synapse.api.constants import MAX_PDU_SIZE from synapse.app import check_bind_error from synapse.app.phone_stats_home import start_phone_stats_home @@ -60,6 +59,7 @@ from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.logging.context import PreserveLoggingContext +from synapse.logging.opentracing import init_tracer from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats @@ -431,7 +431,7 @@ async def start(hs: "HomeServer") -> None: refresh_certificate(hs) # Start the tracer - synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa + init_tracer(hs) # noqa # Instantiate the modules so they can register their web resources to the module API # before we start the listeners. diff --git a/synapse/config/server.py b/synapse/config/server.py index d771045b52..b6cd326416 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -186,7 +186,7 @@ KNOWN_RESOURCES = { class HttpResourceConfig: names: List[str] = attr.ib( factory=list, - validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore + validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), ) compress: bool = attr.ib( default=False, @@ -231,9 +231,7 @@ class ManholeConfig: class LimitRemoteRoomsConfig: enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) complexity: Union[float, int] = attr.ib( - validator=attr.validators.instance_of( - (float, int) # type: ignore[arg-type] # noqa - ), + validator=attr.validators.instance_of((float, int)), # noqa default=1.0, ) complexity_error: str = attr.ib( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index beab1227b8..884b5d60b4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -268,8 +268,8 @@ class FederationServer(FederationBase): transaction_id=transaction_id, destination=destination, origin=origin, - origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore - pdus=transaction_data.get("pdus"), # type: ignore + origin_server_ts=transaction_data.get("origin_server_ts"), # type: ignore[arg-type] + pdus=transaction_data.get("pdus"), edus=transaction_data.get("edus"), ) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1421050b9a..9ce06dfa28 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -229,21 +229,21 @@ class TransportLayerClient: """ logger.debug( "send_data dest=%s, txid=%s", - transaction.destination, # type: ignore - transaction.transaction_id, # type: ignore + transaction.destination, + transaction.transaction_id, ) - if transaction.destination == self.server_name: # type: ignore + if transaction.destination == self.server_name: raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is # generated by the json_data_callback. json_data = transaction.get_dict() - path = _create_v1_path("/send/%s", transaction.transaction_id) # type: ignore + path = _create_v1_path("/send/%s", transaction.transaction_id) return await self.client.put_json( - transaction.destination, # type: ignore + transaction.destination, path=path, data=json_data, json_data_callback=json_data_callback, diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 86991d26ce..22678d486d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -481,7 +481,7 @@ class AuthHandler: sid = authdict["session"] # Convert the URI and method to strings. - uri = request.uri.decode("utf-8") # type: ignore + uri = request.uri.decode("utf-8") method = request.method.decode("utf-8") # If there's no session ID, create a new session. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 724b9cfcb4..f6ffb7d18d 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -966,7 +966,7 @@ class OidcProvider: "Mapping provider does not support de-duplicating Matrix IDs" ) - attributes = await self._user_mapping_provider.map_user_attributes( # type: ignore + attributes = await self._user_mapping_provider.map_user_attributes( userinfo, token ) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 102dd4b57d..5619f8f50e 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -357,7 +357,7 @@ class SearchHandler: itertools.chain( # The events_before and events_after for each context. itertools.chain.from_iterable( - itertools.chain(context["events_before"], context["events_after"]) # type: ignore[arg-type] + itertools.chain(context["events_before"], context["events_after"]) for context in contexts.values() ), # The returned events. @@ -373,10 +373,10 @@ class SearchHandler: for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] + context["events_before"], time_now, bundle_aggregations=aggregations ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] + context["events_after"], time_now, bundle_aggregations=aggregations ) results = [ diff --git a/synapse/http/server.py b/synapse/http/server.py index 31ca841889..b8a7a0f5df 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -295,7 +295,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): if isawaitable(raw_callback_return): callback_return = await raw_callback_return else: - callback_return = raw_callback_return # type: ignore + callback_return = raw_callback_return return callback_return @@ -469,7 +469,7 @@ class JsonResource(DirectServeJsonResource): if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)): callback_return = await raw_callback_return else: - callback_return = raw_callback_return # type: ignore + callback_return = raw_callback_return return callback_return diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 8f9e629274..7e6e23db73 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -109,6 +109,7 @@ from synapse.storage.state import StateFilter from synapse.types import ( DomainSpecificString, JsonDict, + JsonMapping, Requester, StateMap, UserID, @@ -151,6 +152,7 @@ __all__ = [ "PRESENCE_ALL_USERS", "LoginResponse", "JsonDict", + "JsonMapping", "EventBase", "StateMap", "ProfileInfo", @@ -1419,7 +1421,7 @@ class AccountDataManager: f"{user_id} is not local to this homeserver; can't access account data for remote users." ) - async def get_global(self, user_id: str, data_type: str) -> Optional[JsonDict]: + async def get_global(self, user_id: str, data_type: str) -> Optional[JsonMapping]: """ Gets some global account data, of a specified type, for the specified user. diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 4f1c22c71b..5beb8f1d4b 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -232,10 +232,10 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant - self._invalidate_all_cache_and_stream( # type: ignore[attr-defined] + self._invalidate_all_cache_and_stream( txn, self.user_last_seen_monthly_active ) - self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) # type: ignore[attr-defined] + self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) reserved_users = await self.get_registered_reserved_users() await self.db_pool.runInteraction( @@ -363,7 +363,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): if self._limit_usage_by_mau or self._mau_stats_only: # Trial users and guests should not be included as part of MAU group - is_guest = await self.is_guest(user_id) # type: ignore[attr-defined] + is_guest = await self.is_guest(user_id) if is_guest: return is_trial = await self.is_trial_user(user_id) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index e3153d1a4a..546d6bae6e 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -501,11 +501,11 @@ def _upgrade_existing_database( if hasattr(module, "run_create"): logger.info("Running %s:run_create", relative_path) - module.run_create(cur, database_engine) # type: ignore + module.run_create(cur, database_engine) if not is_empty and hasattr(module, "run_upgrade"): logger.info("Running %s:run_upgrade", relative_path) - module.run_upgrade(cur, database_engine, config=config) # type: ignore + module.run_upgrade(cur, database_engine, config=config) elif ext == ".pyc" or file_name == "__pycache__": # Sometimes .pyc files turn up anyway even though we've # disabled their generation; e.g. from distribution package diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 0b9ac26b69..f6b3ee31e4 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -107,7 +107,7 @@ class TTLCache(Generic[KT, VT]): self._metrics.inc_hits() return e.value, e.expiry_time, e.ttl - def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: # type: ignore + def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: """Remove a value from the cache If key is in the cache, remove it and return its value, else return default. diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 45fd30cf43..b6ba19c739 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -193,8 +193,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self): - # Type ignore: mypy doesn't like us assigning to methods. - self.store.count_monthly_users = Mock( # type: ignore[assignment] + self.store.count_monthly_users = Mock( return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) ) # Ensure does not throw exception @@ -202,8 +201,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_blocked(self): - # Type ignore: mypy doesn't like us assigning to methods. - self.store.get_monthly_active_count = Mock( # type: ignore[assignment] + self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.lots_of_users) ) self.get_failure( @@ -211,8 +209,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): ResourceLimitError, ) - # Type ignore: mypy doesn't like us assigning to methods. - self.store.get_monthly_active_count = Mock( # type: ignore[assignment] + self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.hs.config.server.max_mau_value) ) self.get_failure( diff --git a/tests/module_api/test_account_data_manager.py b/tests/module_api/test_account_data_manager.py index bec018d9e7..89009bea8c 100644 --- a/tests/module_api/test_account_data_manager.py +++ b/tests/module_api/test_account_data_manager.py @@ -11,8 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import SynapseError from synapse.rest import admin +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -22,7 +26,9 @@ class ModuleApiTestCase(HomeserverTestCase): admin.register_servlets, ] - def prepare(self, reactor, clock, homeserver) -> None: + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self._store = homeserver.get_datastores().main self._module_api = homeserver.get_module_api() self._account_data_mgr = self._module_api.account_data_manager @@ -91,7 +97,7 @@ class ModuleApiTestCase(HomeserverTestCase): ) with self.assertRaises(TypeError): # This throws an exception because it's a frozen dict. - the_data["wombat"] = False + the_data["wombat"] = False # type: ignore[index] def test_put_global(self) -> None: """ @@ -143,15 +149,14 @@ class ModuleApiTestCase(HomeserverTestCase): with self.assertRaises(TypeError): # The account data type must be a string. self.get_success_or_raise( - self._module_api.account_data_manager.put_global( - self.user_id, 42, {} # type: ignore - ) + self._module_api.account_data_manager.put_global(self.user_id, 42, {}) # type: ignore[arg-type] ) with self.assertRaises(TypeError): # The account data dict must be a dict. + # noinspection PyTypeChecker self.get_success_or_raise( self._module_api.account_data_manager.put_global( - self.user_id, "test.data", 42 # type: ignore + self.user_id, "test.data", 42 # type: ignore[arg-type] ) ) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index ba1a63c0d6..6104a55aa1 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -102,8 +102,8 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): for i in range(20): server_name = "other_server_%d" % (i,) room = self.create_room_with_remote_server(user, token, server_name) - mock_client1.reset_mock() # type: ignore[attr-defined] - mock_client2.reset_mock() # type: ignore[attr-defined] + mock_client1.reset_mock() + mock_client2.reset_mock() self.create_and_send_event(room, UserID.from_string(user)) self.replicate() @@ -167,8 +167,8 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): for i in range(20): server_name = "other_server_%d" % (i,) room = self.create_room_with_remote_server(user, token, server_name) - mock_client1.reset_mock() # type: ignore[attr-defined] - mock_client2.reset_mock() # type: ignore[attr-defined] + mock_client1.reset_mock() + mock_client2.reset_mock() self.get_success( typing_handler.started_typing( diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index f05a373aa0..0d0d6faf0d 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -52,7 +52,7 @@ def make_awaitable(result: TV) -> Awaitable[TV]: This uses Futures as they can be awaited multiple times so can be returned to multiple callers. """ - future = Future() # type: ignore + future: Future[TV] = Future() future.set_result(result) return future @@ -69,7 +69,7 @@ def setup_awaitable_errors() -> Callable[[], None]: # State shared between unraisablehook and check_for_unraisable_exceptions. unraisable_exceptions = [] - orig_unraisablehook = sys.unraisablehook # type: ignore + orig_unraisablehook = sys.unraisablehook def unraisablehook(unraisable): unraisable_exceptions.append(unraisable.exc_value) @@ -78,11 +78,11 @@ def setup_awaitable_errors() -> Callable[[], None]: """ A method to be used as a clean-up that fails a test-case if there are any new unraisable exceptions. """ - sys.unraisablehook = orig_unraisablehook # type: ignore + sys.unraisablehook = orig_unraisablehook if unraisable_exceptions: raise unraisable_exceptions.pop() - sys.unraisablehook = unraisablehook # type: ignore + sys.unraisablehook = unraisablehook return cleanup diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 51a197a8c6..9228454c9e 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -27,7 +27,7 @@ class ToTwistedHandler(logging.Handler): def emit(self, record): log_entry = self.format(record) log_level = record.levelname.lower().replace("warning", "warn") - self.tx_log.emit( # type: ignore + self.tx_log.emit( twisted.logger.LogLevel.levelWithName(log_level), "{entry}", entry=log_entry ) From 30c8e7e408322967e5beb2a64ef5f796cb8df226 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 27 Apr 2022 14:10:31 +0100 Subject: [PATCH 07/87] Make `scripts-dev` pass `mypy --disallow-untyped-defs` (#12356) Not enforced in config yet. One day. --- changelog.d/12356.misc | 1 + mypy.ini | 10 +++-- poetry.lock | 25 ++++++++++--- pyproject.toml | 4 +- scripts-dev/build_debian_packages.py | 21 +++++++---- scripts-dev/federation_client.py | 27 +++++++++----- scripts-dev/mypy_synapse_plugin.py | 4 +- scripts-dev/release.py | 55 +++++++++++++++++----------- scripts-dev/sign_json.py | 2 +- 9 files changed, 96 insertions(+), 53 deletions(-) create mode 100644 changelog.d/12356.misc diff --git a/changelog.d/12356.misc b/changelog.d/12356.misc new file mode 100644 index 0000000000..43e1929106 --- /dev/null +++ b/changelog.d/12356.misc @@ -0,0 +1 @@ +Fix scripts-dev to pass typechecking. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 280f1e898e..ef28216418 100644 --- a/mypy.ini +++ b/mypy.ini @@ -24,10 +24,6 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( - |scripts-dev/build_debian_packages.py - |scripts-dev/federation_client.py - |scripts-dev/release.py - |synapse/storage/databases/__init__.py |synapse/storage/databases/main/cache.py |synapse/storage/databases/main/devices.py @@ -308,6 +304,9 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True +[mypy-redbaron.*] +ignore_missing_imports = True + [mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True @@ -323,6 +322,9 @@ ignore_missing_imports = True [mypy-signedjson.*] ignore_missing_imports = True +[mypy-srvlookup.*] +ignore_missing_imports = True + [mypy-treq.*] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index 8c7af1fa1e..e27a44989c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -309,14 +309,15 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.14" -description = "Python Git Library" +version = "3.1.27" +description = "GitPython is a python library used to interact with Git repositories" category = "dev" optional = false -python-versions = ">=3.4" +python-versions = ">=3.7" [package.dependencies] gitdb = ">=4.0.1,<5" +typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "hiredis" @@ -1315,6 +1316,14 @@ category = "dev" optional = false python-versions = "*" +[[package]] +name = "types-commonmark" +version = "0.9.2" +description = "Typing stubs for commonmark" +category = "dev" +optional = false +python-versions = "*" + [[package]] name = "types-cryptography" version = "3.3.15" @@ -1553,7 +1562,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "f482a4f594a165dfe01ce253a22510d5faf38647ab0dcebc35789350cafd9bf0" +content-hash = "3825cef058b8c9f520ef4b7acb92519be95db9a663a61c2e89a5fe431ed55655" [metadata.files] attrs = [ @@ -1766,8 +1775,8 @@ gitdb = [ {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, ] gitpython = [ - {file = "GitPython-3.1.14-py3-none-any.whl", hash = "sha256:3283ae2fba31c913d857e12e5ba5f9a7772bbc064ae2bb09efafa71b0dd4939b"}, - {file = "GitPython-3.1.14.tar.gz", hash = "sha256:be27633e7509e58391f10207cd32b2a6cf5b908f92d9cd30da2e514e1137af61"}, + {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"}, + {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"}, ] hiredis = [ {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, @@ -2588,6 +2597,10 @@ types-bleach = [ {file = "types-bleach-4.1.4.tar.gz", hash = "sha256:2d30c2c4fb6854088ac636471352c9a51bf6c089289800d2a8060820a01cd43a"}, {file = "types_bleach-4.1.4-py3-none-any.whl", hash = "sha256:edffe173ed6d7b6f3543036a96204a9319c3bf6c3645917b14274e43f000cc9b"}, ] +types-commonmark = [ + {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, + {file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"}, +] types-cryptography = [ {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"}, {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"}, diff --git a/pyproject.toml b/pyproject.toml index bdded78434..e6f2dc16cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -251,6 +251,7 @@ flake8 = "*" mypy = "==0.931" mypy-zope = "==0.3.5" types-bleach = ">=4.1.0" +types-commonmark = ">=0.9.2" types-jsonschema = ">=3.2.0" types-opentracing = ">=2.4.2" types-Pillow = ">=8.3.4" @@ -270,7 +271,8 @@ idna = ">=2.5" # The following are used by the release script click = "==8.1.0" -GitPython = "==3.1.14" +# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints. +GitPython = ">=3.1.20" commonmark = "==0.9.1" pygithub = "==1.55" # The following are executed as commands by the release script. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index e3e6878686..38564893e9 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -17,7 +17,8 @@ import subprocess import sys import threading from concurrent.futures import ThreadPoolExecutor -from typing import Optional, Sequence +from types import FrameType +from typing import Collection, Optional, Sequence, Set DISTS = ( "debian:buster", # oldstable: EOL 2022-08 @@ -41,15 +42,17 @@ projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) class Builder(object): def __init__( - self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None + self, + redirect_stdout: bool = False, + docker_build_args: Optional[Sequence[str]] = None, ): self.redirect_stdout = redirect_stdout self._docker_build_args = tuple(docker_build_args or ()) - self.active_containers = set() + self.active_containers: Set[str] = set() self._lock = threading.Lock() self._failed = False - def run_build(self, dist, skip_tests=False): + def run_build(self, dist: str, skip_tests: bool = False) -> None: """Build deb for a single distribution""" if self._failed: @@ -63,7 +66,7 @@ class Builder(object): self._failed = True raise - def _inner_build(self, dist, skip_tests=False): + def _inner_build(self, dist: str, skip_tests: bool = False) -> None: tag = dist.split(":", 1)[1] # Make the dir where the debs will live. @@ -138,7 +141,7 @@ class Builder(object): stdout.close() print("Completed build of %s" % (dist,)) - def kill_containers(self): + def kill_containers(self) -> None: with self._lock: active = list(self.active_containers) @@ -156,8 +159,10 @@ class Builder(object): self.active_containers.remove(c) -def run_builds(builder, dists, jobs=1, skip_tests=False): - def sig(signum, _frame): +def run_builds( + builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False +) -> None: + def sig(signum: int, _frame: Optional[FrameType]) -> None: print("Caught SIGINT") builder.kill_containers() diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 079d2f5ed0..763dd02c47 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -38,7 +38,7 @@ import argparse import base64 import json import sys -from typing import Any, Optional +from typing import Any, Dict, Optional, Tuple from urllib import parse as urlparse import requests @@ -47,13 +47,14 @@ import signedjson.types import srvlookup import yaml from requests.adapters import HTTPAdapter +from urllib3 import HTTPConnectionPool # uncomment the following to enable debug logging of http requests # from httplib import HTTPConnection # HTTPConnection.debuglevel = 1 -def encode_base64(input_bytes): +def encode_base64(input_bytes: bytes) -> str: """Encode bytes as a base64 string without any padding.""" input_len = len(input_bytes) @@ -63,7 +64,7 @@ def encode_base64(input_bytes): return output_string -def encode_canonical_json(value): +def encode_canonical_json(value: object) -> bytes: return json.dumps( value, # Encode code-points outside of ASCII as UTF-8 rather than \u escapes @@ -130,7 +131,7 @@ def request( sig, destination, ) - authorization_headers.append(header.encode("ascii")) + authorization_headers.append(header) print("Authorization: %s" % header, file=sys.stderr) dest = "matrix://%s%s" % (destination, path) @@ -139,7 +140,10 @@ def request( s = requests.Session() s.mount("matrix://", MatrixConnectionAdapter()) - headers = {"Host": destination, "Authorization": authorization_headers[0]} + headers: Dict[str, str] = { + "Host": destination, + "Authorization": authorization_headers[0], + } if method == "POST": headers["Content-Type"] = "application/json" @@ -154,7 +158,7 @@ def request( ) -def main(): +def main() -> None: parser = argparse.ArgumentParser( description="Signs and sends a federation request to a matrix homeserver" ) @@ -212,6 +216,7 @@ def main(): if not args.server_name or not args.signing_key: read_args_from_config(args) + assert isinstance(args.signing_key, str) algorithm, version, key_base64 = args.signing_key.split() key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64) @@ -233,7 +238,7 @@ def main(): print("") -def read_args_from_config(args): +def read_args_from_config(args: argparse.Namespace) -> None: with open(args.config, "r") as fh: config = yaml.safe_load(fh) @@ -250,7 +255,7 @@ def read_args_from_config(args): class MatrixConnectionAdapter(HTTPAdapter): @staticmethod - def lookup(s, skip_well_known=False): + def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]: if s[-1] == "]": # ipv6 literal (with no port) return s, 8448 @@ -276,7 +281,7 @@ class MatrixConnectionAdapter(HTTPAdapter): return s, 8448 @staticmethod - def get_well_known(server_name): + def get_well_known(server_name: str) -> Optional[str]: uri = "https://%s/.well-known/matrix/server" % (server_name,) print("fetching %s" % (uri,), file=sys.stderr) @@ -299,7 +304,9 @@ class MatrixConnectionAdapter(HTTPAdapter): print("Invalid response from %s: %s" % (uri, e), file=sys.stderr) return None - def get_connection(self, url, proxies=None): + def get_connection( + self, url: str, proxies: Optional[Dict[str, str]] = None + ) -> HTTPConnectionPool: parsed = urlparse.urlparse(url) (host, port) = self.lookup(parsed.netloc) diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 1217e14874..c775865212 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -16,7 +16,7 @@ can crop up, e.g the cache descriptors. """ -from typing import Callable, Optional +from typing import Callable, Optional, Type from mypy.nodes import ARG_NAMED_OPT from mypy.plugin import MethodSigContext, Plugin @@ -94,7 +94,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: return signature -def plugin(version: str): +def plugin(version: str) -> Type[SynapsePlugin]: # This is the entry point of the plugin, and let's us deal with the fact # that the mypy plugin interface is *not* stable by looking at the version # string. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 9d7c7c445f..14f3f3a45d 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -25,7 +25,7 @@ import sys import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import List, Optional +from typing import Any, List, Optional, cast import attr import click @@ -36,7 +36,9 @@ from github import Github from packaging import version -def run_until_successful(command, *args, **kwargs): +def run_until_successful( + command: str, *args: Any, **kwargs: Any +) -> subprocess.CompletedProcess: while True: completed_process = subprocess.run(command, *args, **kwargs) exit_code = completed_process.returncode @@ -50,7 +52,7 @@ def run_until_successful(command, *args, **kwargs): @click.group() -def cli(): +def cli() -> None: """An interactive script to walk through the parts of creating a release. Requires the dev dependencies be installed, which can be done via: @@ -81,7 +83,7 @@ def cli(): @cli.command() -def prepare(): +def prepare() -> None: """Do the initial stages of creating a release, including creating release branch, updating changelog and pushing to GitHub. """ @@ -161,7 +163,9 @@ def prepare(): click.get_current_context().abort() # Switch to the release branch. - parsed_new_version: version.Version = version.parse(new_version) + # Cast safety: parse() won't return a version.LegacyVersion from our + # version string format. + parsed_new_version = cast(version.Version, version.parse(new_version)) # We assume for debian changelogs that we only do RCs or full releases. assert not parsed_new_version.is_devrelease @@ -176,7 +180,6 @@ def prepare(): # If the release branch only exists on the remote we check it out # locally. repo.git.checkout(release_branch_name) - release_branch = repo.active_branch else: # If a branch doesn't exist we create one. We ask which one branch it # should be based off, defaulting to sensible values depending on the @@ -198,13 +201,15 @@ def prepare(): click.get_current_context().abort() # Check out the base branch and ensure it's up to date - repo.head.reference = base_branch + repo.head.set_reference(base_branch, "check out the base branch") repo.head.reset(index=True, working_tree=True) if not base_branch.is_remote(): update_branch(repo) # Create the new release branch - release_branch = repo.create_head(release_branch_name, commit=base_branch) + # Type ignore will no longer be needed after GitPython 3.1.28. + # See https://github.com/gitpython-developers/GitPython/pull/1419 + repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type] # Switch to the release branch and ensure it's up to date. repo.git.checkout(release_branch_name) @@ -265,7 +270,7 @@ def prepare(): @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"]) -def tag(gh_token: Optional[str]): +def tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" # Make sure we're in a git repo. @@ -293,7 +298,12 @@ def tag(gh_token: Optional[str]): click.echo_via_pager(changes) if click.confirm("Edit text?", default=False): - changes = click.edit(changes, require_save=False) + edited_changes = click.edit(changes, require_save=False) + # This assert is for mypy's benefit. click's docs are a little unclear, but + # when `require_save=False`, not saving the temp file in the editor returns + # the original string. + assert edited_changes is not None + changes = edited_changes repo.create_tag(tag_name, message=changes, sign=True) @@ -347,7 +357,7 @@ def tag(gh_token: Optional[str]): @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) -def publish(gh_token: str): +def publish(gh_token: str) -> None: """Publish release.""" # Make sure we're in a git repo. @@ -390,7 +400,7 @@ def publish(gh_token: str): @cli.command() -def upload(): +def upload() -> None: """Upload release to pypi.""" current_version = get_package_version() @@ -418,7 +428,7 @@ def upload(): @cli.command() -def announce(): +def announce() -> None: """Generate markdown to announce the release.""" current_version = get_package_version() @@ -461,18 +471,19 @@ def get_package_version() -> version.Version: def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" - if ref_name in repo.refs: - return repo.refs[ref_name] + if ref_name in repo.references: + return repo.references[ref_name] elif ref_name in repo.remote().refs: return repo.remote().refs[ref_name] else: return None -def update_branch(repo: git.Repo): +def update_branch(repo: git.Repo) -> None: """Ensure branch is up to date if it has a remote""" - if repo.active_branch.tracking_branch(): - repo.git.merge(repo.active_branch.tracking_branch().name) + tracking_branch = repo.active_branch.tracking_branch() + if tracking_branch: + repo.git.merge(tracking_branch.name) def get_changes_for_version(wanted_version: version.Version) -> str: @@ -536,7 +547,9 @@ def get_changes_for_version(wanted_version: version.Version) -> str: return "\n".join(version_changelog) -def generate_and_write_changelog(current_version: version.Version, new_version: str): +def generate_and_write_changelog( + current_version: version.Version, new_version: str +) -> None: # We do this by getting a draft so that we can edit it before writing to the # changelog. result = run_until_successful( @@ -558,8 +571,8 @@ def generate_and_write_changelog(current_version: version.Version, new_version: f.write(existing_content) # Remove all the news fragments - for f in glob.iglob("changelog.d/*.*"): - os.remove(f) + for filename in glob.iglob("changelog.d/*.*"): + os.remove(filename) if __name__ == "__main__": diff --git a/scripts-dev/sign_json.py b/scripts-dev/sign_json.py index 9459543106..bb217799fb 100755 --- a/scripts-dev/sign_json.py +++ b/scripts-dev/sign_json.py @@ -27,7 +27,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.util import json_encoder -def main(): +def main() -> None: parser = argparse.ArgumentParser( description="""Adds a signature to a JSON object. From d743b25c8f0c96516d9c374ee946e93e937e2c5b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 27 Apr 2022 14:39:41 +0100 Subject: [PATCH 08/87] Use supervisord to supervise Postgres and Caddy in the Complement image. (#12480) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/12480.misc | 1 + docker/Dockerfile-workers | 3 +++ docker/complement/SynapseWorkers.Dockerfile | 5 ++++- .../conf-workers/caddy.supervisord.conf | 7 +++++++ .../conf-workers/postgres.supervisord.conf | 16 ++++++++++++++++ .../start-complement-synapse-workers.sh | 6 ------ docker/conf/log.config | 4 ---- docker/configure_workers_and_start.py | 2 +- docker/prefix-log | 12 ++++++++++++ 9 files changed, 44 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12480.misc create mode 100644 docker/complement/conf-workers/caddy.supervisord.conf create mode 100644 docker/complement/conf-workers/postgres.supervisord.conf create mode 100755 docker/prefix-log diff --git a/changelog.d/12480.misc b/changelog.d/12480.misc new file mode 100644 index 0000000000..18a85e7b15 --- /dev/null +++ b/changelog.d/12480.misc @@ -0,0 +1 @@ +Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. \ No newline at end of file diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 9ccb2b22a7..24b03585f9 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -20,6 +20,9 @@ RUN rm /etc/nginx/sites-enabled/default # Copy Synapse worker, nginx and supervisord configuration template files COPY ./docker/conf-workers/* /conf/ +# Copy a script to prefix log lines with the supervisor program name +COPY ./docker/prefix-log /usr/local/bin/ + # Expose nginx listener port EXPOSE 8080/tcp diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile index 65df2d114d..9a4438e730 100644 --- a/docker/complement/SynapseWorkers.Dockerfile +++ b/docker/complement/SynapseWorkers.Dockerfile @@ -34,13 +34,16 @@ WORKDIR /data # Copy the caddy config COPY conf-workers/caddy.complement.json /root/caddy.json +COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf +COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf + # Copy the entrypoint COPY conf-workers/start-complement-synapse-workers.sh / # Expose caddy's listener ports EXPOSE 8008 8448 -ENTRYPOINT /start-complement-synapse-workers.sh +ENTRYPOINT ["/start-complement-synapse-workers.sh"] # Update the healthcheck to have a shorter check interval HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ diff --git a/docker/complement/conf-workers/caddy.supervisord.conf b/docker/complement/conf-workers/caddy.supervisord.conf new file mode 100644 index 0000000000..d9ddb51dac --- /dev/null +++ b/docker/complement/conf-workers/caddy.supervisord.conf @@ -0,0 +1,7 @@ +[program:caddy] +command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json +autorestart=unexpected +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf-workers/postgres.supervisord.conf new file mode 100644 index 0000000000..5608342d1a --- /dev/null +++ b/docker/complement/conf-workers/postgres.supervisord.conf @@ -0,0 +1,16 @@ +[program:postgres] +command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground + +# Lower priority number = starts first +priority=1 + +autorestart=unexpected +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 + +# Use 'Fast Shutdown' mode which aborts current transactions and closes connections quickly. +# (Default (TERM) is 'Smart Shutdown' which stops accepting new connections but +# lets existing connections close gracefully.) +stopsignal=INT diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh index 2c1e05bd62..b9a6b55bbe 100755 --- a/docker/complement/conf-workers/start-complement-synapse-workers.sh +++ b/docker/complement/conf-workers/start-complement-synapse-workers.sh @@ -12,12 +12,6 @@ function log { # Replace the server name in the caddy config sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json -log "starting postgres" -pg_ctlcluster 13 main start - -log "starting caddy" -/root/caddy start --config /root/caddy.json - # Set the server name of the homeserver export SYNAPSE_SERVER_NAME=${SERVER_NAME} diff --git a/docker/conf/log.config b/docker/conf/log.config index 7a216a36a0..dc8c70befd 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -2,11 +2,7 @@ version: 1 formatters: precise: -{% if worker_name %} - format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' -{% else %} format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' -{% endif %} handlers: {% if LOG_FILE_PATH %} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 3bda6c300b..33fc20d218 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -171,7 +171,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { # Templates for sections that may be inserted multiple times in config files SUPERVISORD_PROCESS_CONFIG_BLOCK = """ [program:synapse_{name}] -command=/usr/local/bin/python -m {app} \ +command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \ --config-path="{config_path}" \ --config-path=/conf/workers/shared.yaml \ --config-path=/conf/workers/{name}.yaml diff --git a/docker/prefix-log b/docker/prefix-log new file mode 100755 index 0000000000..0e26a4f19d --- /dev/null +++ b/docker/prefix-log @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Prefixes all lines on stdout and stderr with the process name (as determined by +# the SUPERVISOR_PROCESS_NAME env var, which is automatically set by Supervisor). +# +# Usage: +# prefix-log command [args...] +# + +exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1) +exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2) +exec "$@" From 5ef673de4f0bf991402ee29235741a91a7cc9b02 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 27 Apr 2022 15:55:33 +0200 Subject: [PATCH 09/87] Add a module API to allow modules to edit push rule actions (#12406) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/12406.feature | 1 + synapse/handlers/push_rules.py | 138 ++++++++++++++++++++ synapse/module_api/__init__.py | 64 +++++++++ synapse/module_api/errors.py | 4 + synapse/rest/client/push_rule.py | 112 +++------------- synapse/server.py | 5 + synapse/storage/databases/main/push_rule.py | 15 +-- tests/module_api/test_api.py | 84 +++++++++++- 8 files changed, 319 insertions(+), 104 deletions(-) create mode 100644 changelog.d/12406.feature create mode 100644 synapse/handlers/push_rules.py diff --git a/changelog.d/12406.feature b/changelog.d/12406.feature new file mode 100644 index 0000000000..e345afdee7 --- /dev/null +++ b/changelog.d/12406.feature @@ -0,0 +1 @@ +Add a module API to allow modules to change actions for existing push rules of local users. diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py new file mode 100644 index 0000000000..2599160bcc --- /dev/null +++ b/synapse/handlers/push_rules.py @@ -0,0 +1,138 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING, List, Optional, Union + +import attr + +from synapse.api.errors import SynapseError, UnrecognizedRequestError +from synapse.push.baserules import BASE_RULE_IDS +from synapse.storage.push_rule import RuleNotFoundException +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RuleSpec: + scope: str + template: str + rule_id: str + attr: Optional[str] + + +class PushRulesHandler: + """A class to handle changes in push rules for users.""" + + def __init__(self, hs: "HomeServer"): + self._notifier = hs.get_notifier() + self._main_store = hs.get_datastores().main + + async def set_rule_attr( + self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict] + ) -> None: + """Set an attribute (enabled or actions) on an existing push rule. + + Notifies listeners (e.g. sync handler) of the change. + + Args: + user_id: the user for which to modify the push rule. + spec: the spec of the push rule to modify. + val: the value to change the attribute to. + + Raises: + RuleNotFoundException if the rule being modified doesn't exist. + SynapseError(400) if the value is malformed. + UnrecognizedRequestError if the attribute to change is unknown. + InvalidRuleException if we're trying to change the actions on a rule but + the provided actions aren't compliant with the spec. + """ + if spec.attr not in ("enabled", "actions"): + # for the sake of potential future expansion, shouldn't report + # 404 in the case of an unknown request so check it corresponds to + # a known attribute first. + raise UnrecognizedRequestError() + + namespaced_rule_id = f"global/{spec.template}/{spec.rule_id}" + rule_id = spec.rule_id + is_default_rule = rule_id.startswith(".") + if is_default_rule: + if namespaced_rule_id not in BASE_RULE_IDS: + raise RuleNotFoundException("Unknown rule %r" % (namespaced_rule_id,)) + if spec.attr == "enabled": + if isinstance(val, dict) and "enabled" in val: + val = val["enabled"] + if not isinstance(val, bool): + # Legacy fallback + # This should *actually* take a dict, but many clients pass + # bools directly, so let's not break them. + raise SynapseError(400, "Value for 'enabled' must be boolean") + await self._main_store.set_push_rule_enabled( + user_id, namespaced_rule_id, val, is_default_rule + ) + elif spec.attr == "actions": + if not isinstance(val, dict): + raise SynapseError(400, "Value must be a dict") + actions = val.get("actions") + if not isinstance(actions, list): + raise SynapseError(400, "Value for 'actions' must be dict") + check_actions(actions) + rule_id = spec.rule_id + is_default_rule = rule_id.startswith(".") + if is_default_rule: + if namespaced_rule_id not in BASE_RULE_IDS: + raise RuleNotFoundException( + "Unknown rule %r" % (namespaced_rule_id,) + ) + await self._main_store.set_push_rule_actions( + user_id, namespaced_rule_id, actions, is_default_rule + ) + else: + raise UnrecognizedRequestError() + + self.notify_user(user_id) + + def notify_user(self, user_id: str) -> None: + """Notify listeners about a push rule change. + + Args: + user_id: the user ID the change is for. + """ + stream_id = self._main_store.get_max_push_rules_stream_id() + self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) + + +def check_actions(actions: List[Union[str, JsonDict]]) -> None: + """Check if the given actions are spec compliant. + + Args: + actions: the actions to check. + + Raises: + InvalidRuleException if the rules aren't compliant with the spec. + """ + if not isinstance(actions, list): + raise InvalidRuleException("No actions found") + + for a in actions: + if a in ["notify", "dont_notify", "coalesce"]: + pass + elif isinstance(a, dict) and "set_tweak" in a: + pass + else: + raise InvalidRuleException("Unrecognised action %s" % a) + + +class InvalidRuleException(Exception): + pass diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7e6e23db73..834fe1b62c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -82,6 +82,7 @@ from synapse.handlers.auth import ( ON_LOGGED_OUT_CALLBACK, AuthHandler, ) +from synapse.handlers.push_rules import RuleSpec, check_actions from synapse.http.client import SimpleHttpClient from synapse.http.server import ( DirectServeHtmlResource, @@ -195,6 +196,7 @@ class ModuleApi: self._clock: Clock = hs.get_clock() self._registration_handler = hs.get_registration_handler() self._send_email_handler = hs.get_send_email_handler() + self._push_rules_handler = hs.get_push_rules_handler() self.custom_template_dir = hs.config.server.custom_template_directory try: @@ -1352,6 +1354,68 @@ class ModuleApi: """ await self._store.add_user_bound_threepid(user_id, medium, address, id_server) + def check_push_rule_actions( + self, actions: List[Union[str, Dict[str, str]]] + ) -> None: + """Checks if the given push rule actions are valid according to the Matrix + specification. + + See https://spec.matrix.org/v1.2/client-server-api/#actions for the list of valid + actions. + + Added in Synapse v1.58.0. + + Args: + actions: the actions to check. + + Raises: + synapse.module_api.errors.InvalidRuleException if the actions are invalid. + """ + check_actions(actions) + + async def set_push_rule_action( + self, + user_id: str, + scope: str, + kind: str, + rule_id: str, + actions: List[Union[str, Dict[str, str]]], + ) -> None: + """Changes the actions of an existing push rule for the given user. + + See https://spec.matrix.org/v1.2/client-server-api/#push-rules for more + information about push rules and their syntax. + + Can only be called on the main process. + + Added in Synapse v1.58.0. + + Args: + user_id: the user for which to change the push rule's actions. + scope: the push rule's scope, currently only "global" is allowed. + kind: the push rule's kind. + rule_id: the push rule's identifier. + actions: the actions to run when the rule's conditions match. + + Raises: + RuntimeError if this method is called on a worker or `scope` is invalid. + synapse.module_api.errors.RuleNotFoundException if the rule being modified + can't be found. + synapse.module_api.errors.InvalidRuleException if the actions are invalid. + """ + if self.worker_app is not None: + raise RuntimeError("module tried to change push rule actions on a worker") + + if scope != "global": + raise RuntimeError( + "invalid scope %s, only 'global' is currently allowed" % scope + ) + + spec = RuleSpec(scope, kind, rule_id, "actions") + await self._push_rules_handler.set_rule_attr( + user_id, spec, {"actions": actions} + ) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/module_api/errors.py b/synapse/module_api/errors.py index 1db900e41f..e58e0e60fe 100644 --- a/synapse/module_api/errors.py +++ b/synapse/module_api/errors.py @@ -20,10 +20,14 @@ from synapse.api.errors import ( SynapseError, ) from synapse.config._base import ConfigError +from synapse.handlers.push_rules import InvalidRuleException +from synapse.storage.push_rule import RuleNotFoundException __all__ = [ "InvalidClientCredentialsError", "RedirectException", "SynapseError", "ConfigError", + "InvalidRuleException", + "RuleNotFoundException", ] diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index a93f6fd5e0..b98640b14a 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -12,9 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, Union - -import attr +from typing import TYPE_CHECKING, List, Sequence, Tuple, Union from synapse.api.errors import ( NotFoundError, @@ -22,6 +20,7 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.handlers.push_rules import InvalidRuleException, RuleSpec, check_actions from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -29,7 +28,6 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.push.baserules import BASE_RULE_IDS from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest.client._base import client_patterns @@ -40,14 +38,6 @@ if TYPE_CHECKING: from synapse.server import HomeServer -@attr.s(slots=True, frozen=True, auto_attribs=True) -class RuleSpec: - scope: str - template: str - rule_id: str - attr: Optional[str] - - class PushRuleRestServlet(RestServlet): PATTERNS = client_patterns("/(?Ppushrules/.*)$", v1=True) SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( @@ -60,6 +50,7 @@ class PushRuleRestServlet(RestServlet): self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self._is_worker = hs.config.worker.worker_app is not None + self._push_rules_handler = hs.get_push_rules_handler() async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: if self._is_worker: @@ -81,8 +72,13 @@ class PushRuleRestServlet(RestServlet): user_id = requester.user.to_string() if spec.attr: - await self.set_rule_attr(user_id, spec, content) - self.notify_user(user_id) + try: + await self._push_rules_handler.set_rule_attr(user_id, spec, content) + except InvalidRuleException as e: + raise SynapseError(400, "Invalid actions: %s" % e) + except RuleNotFoundException: + raise NotFoundError("Unknown rule") + return 200, {} if spec.rule_id.startswith("."): @@ -98,23 +94,23 @@ class PushRuleRestServlet(RestServlet): before = parse_string(request, "before") if before: - before = _namespaced_rule_id(spec, before) + before = f"global/{spec.template}/{before}" after = parse_string(request, "after") if after: - after = _namespaced_rule_id(spec, after) + after = f"global/{spec.template}/{after}" try: await self.store.add_push_rule( user_id=user_id, - rule_id=_namespaced_rule_id_from_spec(spec), + rule_id=f"global/{spec.template}/{spec.rule_id}", priority_class=priority_class, conditions=conditions, actions=actions, before=before, after=after, ) - self.notify_user(user_id) + self._push_rules_handler.notify_user(user_id) except InconsistentRuleException as e: raise SynapseError(400, str(e)) except RuleNotFoundException as e: @@ -133,11 +129,11 @@ class PushRuleRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() - namespaced_rule_id = _namespaced_rule_id_from_spec(spec) + namespaced_rule_id = f"global/{spec.template}/{spec.rule_id}" try: await self.store.delete_push_rule(user_id, namespaced_rule_id) - self.notify_user(user_id) + self._push_rules_handler.notify_user(user_id) return 200, {} except StoreError as e: if e.code == 404: @@ -172,55 +168,6 @@ class PushRuleRestServlet(RestServlet): else: raise UnrecognizedRequestError() - def notify_user(self, user_id: str) -> None: - stream_id = self.store.get_max_push_rules_stream_id() - self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) - - async def set_rule_attr( - self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict] - ) -> None: - if spec.attr not in ("enabled", "actions"): - # for the sake of potential future expansion, shouldn't report - # 404 in the case of an unknown request so check it corresponds to - # a known attribute first. - raise UnrecognizedRequestError() - - namespaced_rule_id = _namespaced_rule_id_from_spec(spec) - rule_id = spec.rule_id - is_default_rule = rule_id.startswith(".") - if is_default_rule: - if namespaced_rule_id not in BASE_RULE_IDS: - raise NotFoundError("Unknown rule %s" % (namespaced_rule_id,)) - if spec.attr == "enabled": - if isinstance(val, dict) and "enabled" in val: - val = val["enabled"] - if not isinstance(val, bool): - # Legacy fallback - # This should *actually* take a dict, but many clients pass - # bools directly, so let's not break them. - raise SynapseError(400, "Value for 'enabled' must be boolean") - await self.store.set_push_rule_enabled( - user_id, namespaced_rule_id, val, is_default_rule - ) - elif spec.attr == "actions": - if not isinstance(val, dict): - raise SynapseError(400, "Value must be a dict") - actions = val.get("actions") - if not isinstance(actions, list): - raise SynapseError(400, "Value for 'actions' must be dict") - _check_actions(actions) - namespaced_rule_id = _namespaced_rule_id_from_spec(spec) - rule_id = spec.rule_id - is_default_rule = rule_id.startswith(".") - if is_default_rule: - if namespaced_rule_id not in BASE_RULE_IDS: - raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,)) - await self.store.set_push_rule_actions( - user_id, namespaced_rule_id, actions, is_default_rule - ) - else: - raise UnrecognizedRequestError() - def _rule_spec_from_path(path: Sequence[str]) -> RuleSpec: """Turn a sequence of path components into a rule spec @@ -291,24 +238,11 @@ def _rule_tuple_from_request_object( raise InvalidRuleException("No actions found") actions = req_obj["actions"] - _check_actions(actions) + check_actions(actions) return conditions, actions -def _check_actions(actions: List[Union[str, JsonDict]]) -> None: - if not isinstance(actions, list): - raise InvalidRuleException("No actions found") - - for a in actions: - if a in ["notify", "dont_notify", "coalesce"]: - pass - elif isinstance(a, dict) and "set_tweak" in a: - pass - else: - raise InvalidRuleException("Unrecognised action") - - def _filter_ruleset_with_path(ruleset: JsonDict, path: List[str]) -> JsonDict: if path == []: raise UnrecognizedRequestError( @@ -357,17 +291,5 @@ def _priority_class_from_spec(spec: RuleSpec) -> int: return pc -def _namespaced_rule_id_from_spec(spec: RuleSpec) -> str: - return _namespaced_rule_id(spec, spec.rule_id) - - -def _namespaced_rule_id(spec: RuleSpec, rule_id: str) -> str: - return "global/%s/%s" % (spec.template, rule_id) - - -class InvalidRuleException(Exception): - pass - - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: PushRuleRestServlet(hs).register(http_server) diff --git a/synapse/server.py b/synapse/server.py index 37c72bd83a..d49c76518a 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -91,6 +91,7 @@ from synapse.handlers.presence import ( WorkerPresenceHandler, ) from synapse.handlers.profile import ProfileHandler +from synapse.handlers.push_rules import PushRulesHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler @@ -810,6 +811,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_account_handler(self) -> AccountHandler: return AccountHandler(self) + @cache_in_self + def get_push_rules_handler(self) -> PushRulesHandler: + return PushRulesHandler(self) + @cache_in_self def get_outbound_redis_connection(self) -> "ConnectionHandler": """ diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 92539f5d41..eb85bbd392 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -16,7 +16,7 @@ import abc import logging from typing import TYPE_CHECKING, Dict, List, Tuple, Union -from synapse.api.errors import NotFoundError, StoreError +from synapse.api.errors import StoreError from synapse.push.baserules import list_with_base_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore, db_to_json @@ -618,7 +618,7 @@ class PushRuleStore(PushRulesWorkerStore): are always stored in the database `push_rules` table). Raises: - NotFoundError if the rule does not exist. + RuleNotFoundException if the rule does not exist. """ async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() @@ -668,8 +668,7 @@ class PushRuleStore(PushRulesWorkerStore): ) txn.execute(sql, (user_id, rule_id)) if txn.fetchone() is None: - # needed to set NOT_FOUND code. - raise NotFoundError("Push rule does not exist.") + raise RuleNotFoundException("Push rule does not exist.") self.db_pool.simple_upsert_txn( txn, @@ -698,9 +697,6 @@ class PushRuleStore(PushRulesWorkerStore): """ Sets the `actions` state of a push rule. - Will throw NotFoundError if the rule does not exist; the Code for this - is NOT_FOUND. - Args: user_id: the user ID of the user who wishes to enable/disable the rule e.g. '@tina:example.org' @@ -712,6 +708,9 @@ class PushRuleStore(PushRulesWorkerStore): is_default_rule: True if and only if this is a server-default rule. This skips the check for existence (as only user-created rules are always stored in the database `push_rules` table). + + Raises: + RuleNotFoundException if the rule does not exist. """ actions_json = json_encoder.encode(actions) @@ -744,7 +743,7 @@ class PushRuleStore(PushRulesWorkerStore): except StoreError as serr: if serr.code == 404: # this sets the NOT_FOUND error Code - raise NotFoundError("Push rule does not exist") + raise RuleNotFoundException("Push rule does not exist") else: raise diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9fd5d59c55..8bc84aaaca 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -19,8 +19,9 @@ from synapse.api.constants import EduTypes, EventTypes from synapse.events import EventBase from synapse.federation.units import Transaction from synapse.handlers.presence import UserPresenceState +from synapse.handlers.push_rules import InvalidRuleException from synapse.rest import admin -from synapse.rest.client import login, presence, profile, room +from synapse.rest.client import login, notifications, presence, profile, room from synapse.types import create_requester from tests.events.test_presence_router import send_presence_update, sync_presence @@ -38,6 +39,7 @@ class ModuleApiTestCase(HomeserverTestCase): room.register_servlets, presence.register_servlets, profile.register_servlets, + notifications.register_servlets, ] def prepare(self, reactor, clock, homeserver): @@ -553,6 +555,86 @@ class ModuleApiTestCase(HomeserverTestCase): self.assertEqual(state[("org.matrix.test", "")].state_key, "") self.assertEqual(state[("org.matrix.test", "")].content, {}) + def test_set_push_rules_action(self) -> None: + """Test that a module can change the actions of an existing push rule for a user.""" + + # Create a room with 2 users in it. Push rules must not match if the user is the + # event's sender, so we need one user to send messages and one user to receive + # notifications. + user_id = self.register_user("user", "password") + tok = self.login("user", "password") + + room_id = self.helper.create_room_as(user_id, is_public=True, tok=tok) + + user_id2 = self.register_user("user2", "password") + tok2 = self.login("user2", "password") + self.helper.join(room_id, user_id2, tok=tok2) + + # Register a 3rd user and join them to the room, so that we don't accidentally + # trigger 1:1 push rules. + user_id3 = self.register_user("user3", "password") + tok3 = self.login("user3", "password") + self.helper.join(room_id, user_id3, tok=tok3) + + # Send a message as the second user and check that it notifies. + res = self.helper.send(room_id=room_id, body="here's a message", tok=tok2) + event_id = res["event_id"] + + channel = self.make_request( + "GET", + "/notifications", + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + self.assertEqual(len(channel.json_body["notifications"]), 1, channel.json_body) + self.assertEqual( + channel.json_body["notifications"][0]["event"]["event_id"], + event_id, + channel.json_body, + ) + + # Change the .m.rule.message actions to not notify on new messages. + self.get_success( + defer.ensureDeferred( + self.module_api.set_push_rule_action( + user_id=user_id, + scope="global", + kind="underride", + rule_id=".m.rule.message", + actions=["dont_notify"], + ) + ) + ) + + # Send another message as the second user and check that the number of + # notifications didn't change. + self.helper.send(room_id=room_id, body="here's another message", tok=tok2) + + channel = self.make_request( + "GET", + "/notifications?from=", + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(len(channel.json_body["notifications"]), 1, channel.json_body) + + def test_check_push_rules_actions(self) -> None: + """Test that modules can check whether a list of push rules actions are spec + compliant. + """ + with self.assertRaises(InvalidRuleException): + self.module_api.check_push_rule_actions(["foo"]) + + with self.assertRaises(InvalidRuleException): + self.module_api.check_push_rule_actions({"foo": "bar"}) + + self.module_api.check_push_rule_actions(["notify"]) + + self.module_api.check_push_rule_actions( + [{"set_tweak": "sound", "value": "default"}] + ) + class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" From 78b99de7c206b106340e12cdee0af9aa246bd5ad Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 27 Apr 2022 14:58:26 +0100 Subject: [PATCH 10/87] Prefer `make_awaitable` over `defer.succeed` in tests (#12505) When configuring the return values of mocks, prefer awaitables from `make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable once, so it is inappropriate for a mock to return the same `Deferred` multiple times. Also update `run_in_background` to support functions that return arbitrary awaitables. Signed-off-by: Sean Quah --- changelog.d/12505.misc | 1 + synapse/logging/context.py | 26 +++++++++----- tests/federation/test_federation_client.py | 2 +- tests/federation/test_federation_sender.py | 2 +- tests/handlers/test_e2e_keys.py | 7 ++-- tests/handlers/test_password_providers.py | 34 +++++++++---------- tests/handlers/test_typing.py | 6 ++-- tests/handlers/test_user_directory.py | 6 ++-- tests/rest/client/test_presence.py | 4 +-- tests/rest/client/test_rooms.py | 7 ++-- tests/rest/client/test_transactions.py | 7 ++-- .../test_resource_limits_server_notices.py | 28 +++++++-------- tests/storage/test_monthly_active_users.py | 9 +++-- tests/test_federation.py | 2 +- 14 files changed, 72 insertions(+), 69 deletions(-) create mode 100644 changelog.d/12505.misc diff --git a/changelog.d/12505.misc b/changelog.d/12505.misc new file mode 100644 index 0000000000..a691d7962f --- /dev/null +++ b/changelog.d/12505.misc @@ -0,0 +1 @@ +Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 88cd8a9e1c..fd9cb97920 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -722,6 +722,11 @@ P = ParamSpec("P") R = TypeVar("R") +async def _unwrap_awaitable(awaitable: Awaitable[R]) -> R: + """Unwraps an arbitrary awaitable by awaiting it.""" + return await awaitable + + @overload def preserve_fn( # type: ignore[misc] f: Callable[P, Awaitable[R]], @@ -802,17 +807,20 @@ def run_in_background( # type: ignore[misc] # by synchronous exceptions, so let's turn them into Failures. return defer.fail() + # `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain + # value. Convert it to a `Deferred`. if isinstance(res, typing.Coroutine): + # Wrap the coroutine in a `Deferred`. res = defer.ensureDeferred(res) - - # At this point we should have a Deferred, if not then f was a synchronous - # function, wrap it in a Deferred for consistency. - if not isinstance(res, defer.Deferred): - # `res` is not a `Deferred` and not a `Coroutine`. - # There are no other types of `Awaitable`s we expect to encounter in Synapse. - assert not isinstance(res, Awaitable) - - return defer.succeed(res) + elif isinstance(res, defer.Deferred): + pass + elif isinstance(res, Awaitable): + # `res` is probably some kind of completed awaitable, such as a `DoneAwaitable` + # or `Future` from `make_awaitable`. + res = defer.ensureDeferred(_unwrap_awaitable(res)) + else: + # `res` is a plain value. Wrap it in a `Deferred`. + res = defer.succeed(res) if res.called and not res.paused: # The function should have maintained the logcontext, so we can diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index ec8864dafe..268a48d7ba 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -83,7 +83,7 @@ class FederationClientTest(FederatingHomeserverTestCase): ) # mock up the response, and have the agent return it - self._mock_agent.request.return_value = defer.succeed( + self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( _mock_response( { "pdus": [ diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 91f982518e..6b26353d5e 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -226,7 +226,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # Send the server a device list EDU for the other user, this will cause # it to try and resync the device lists. self.hs.get_federation_transport_client().query_user_devices.return_value = ( - defer.succeed( + make_awaitable( { "stream_id": "1", "user_id": "@user2:host2", diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 8c74ed1fcf..1e6ad4b663 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -19,7 +19,6 @@ from unittest import mock from parameterized import parameterized from signedjson import key as key, sign as sign -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import RoomEncryptionAlgorithms @@ -704,7 +703,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" self.hs.get_federation_client().query_client_keys = mock.Mock( - return_value=defer.succeed( + return_value=make_awaitable( { "device_keys": {remote_user_id: {}}, "master_keys": { @@ -777,14 +776,14 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # Pretend we're sharing a room with the user we're querying. If not, # `_query_devices_for_destination` will return early. self.store.get_rooms_for_user = mock.Mock( - return_value=defer.succeed({"some_room_id"}) + return_value=make_awaitable({"some_room_id"}) ) remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" self.hs.get_federation_client().query_user_devices = mock.Mock( - return_value=defer.succeed( + return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index d401fda938..addf14fa2b 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -17,8 +17,6 @@ from typing import Any, Type, Union from unittest.mock import Mock -from twisted.internet import defer - import synapse from synapse.api.constants import LoginType from synapse.api.errors import Codes @@ -190,7 +188,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.assertEqual(flows, [{"type": "m.login.password"}] + ADDITIONAL_LOGIN_FLOWS) # check_password must return an awaitable - mock_password_provider.check_password.return_value = defer.succeed(True) + mock_password_provider.check_password.return_value = make_awaitable(True) channel = self._send_password_login("u", "p") self.assertEqual(channel.code, 200, channel.result) self.assertEqual("@u:test", channel.json_body["user_id"]) @@ -226,13 +224,13 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.get_success(module_api.register_user("u")) # log in twice, to get two devices - mock_password_provider.check_password.return_value = defer.succeed(True) + mock_password_provider.check_password.return_value = make_awaitable(True) tok1 = self.login("u", "p") self.login("u", "p", device_id="dev2") mock_password_provider.reset_mock() # have the auth provider deny the request to start with - mock_password_provider.check_password.return_value = defer.succeed(False) + mock_password_provider.check_password.return_value = make_awaitable(False) # make the initial request which returns a 401 session = self._start_delete_device_session(tok1, "dev2") @@ -246,7 +244,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # Finally, check the request goes through when we allow it - mock_password_provider.check_password.return_value = defer.succeed(True) + mock_password_provider.check_password.return_value = make_awaitable(True) channel = self._authed_delete_device(tok1, "dev2", session, "u", "p") self.assertEqual(channel.code, 200) mock_password_provider.check_password.assert_called_once_with("@u:test", "p") @@ -260,7 +258,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # check_password must return an awaitable - mock_password_provider.check_password.return_value = defer.succeed(False) + mock_password_provider.check_password.return_value = make_awaitable(False) channel = self._send_password_login("u", "p") self.assertEqual(channel.code, 403, channel.result) @@ -277,7 +275,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # have the auth provider deny the request - mock_password_provider.check_password.return_value = defer.succeed(False) + mock_password_provider.check_password.return_value = make_awaitable(False) # log in twice, to get two devices tok1 = self.login("localuser", "localpass") @@ -320,7 +318,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # check_password must return an awaitable - mock_password_provider.check_password.return_value = defer.succeed(False) + mock_password_provider.check_password.return_value = make_awaitable(False) channel = self._send_password_login("localuser", "localpass") self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -342,7 +340,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # allow login via the auth provider - mock_password_provider.check_password.return_value = defer.succeed(True) + mock_password_provider.check_password.return_value = make_awaitable(True) # log in twice, to get two devices tok1 = self.login("localuser", "p") @@ -359,7 +357,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_password.assert_not_called() # now try deleting with the local password - mock_password_provider.check_password.return_value = defer.succeed(False) + mock_password_provider.check_password.return_value = make_awaitable(False) channel = self._authed_delete_device( tok1, "dev2", session, "localuser", "localpass" ) @@ -413,7 +411,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 400, channel.result) mock_password_provider.check_auth.assert_not_called() - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@user:bz", None) ) channel = self._send_login("test.login_type", "u", test_field="y") @@ -427,7 +425,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # try a weird username. Again, it's unclear what we *expect* to happen # in these cases, but at least we can guard against the API changing # unexpectedly - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@ MALFORMED! :bz", None) ) channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") @@ -477,7 +475,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # right params, but authing as the wrong user - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@user:bz", None) ) body["auth"]["test_field"] = "foo" @@ -490,7 +488,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # and finally, succeed - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@localuser:test", None) ) channel = self._delete_device(tok1, "dev2", body) @@ -508,9 +506,9 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.custom_auth_provider_callback_test_body() def custom_auth_provider_callback_test_body(self): - callback = Mock(return_value=defer.succeed(None)) + callback = Mock(return_value=make_awaitable(None)) - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@user:bz", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") @@ -646,7 +644,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): login is disabled""" # register the user and log in twice via the test login type to get two devices, self.register_user("localuser", "localpass") - mock_password_provider.check_auth.return_value = defer.succeed( + mock_password_provider.check_auth.return_value = make_awaitable( ("@localuser:test", None) ) channel = self._send_login("test.login_type", "localuser", test_field="") diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ffd5c4cb93..5f2e26a5fc 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -65,11 +65,11 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): # we mock out the keyring so as to skip the authentication check on the # federation API call. mock_keyring = Mock(spec=["verify_json_for_server"]) - mock_keyring.verify_json_for_server.return_value = defer.succeed(True) + mock_keyring.verify_json_for_server.return_value = make_awaitable(True) # we mock out the federation client too mock_federation_client = Mock(spec=["put_json"]) - mock_federation_client.put_json.return_value = defer.succeed((200, "OK")) + mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) @@ -98,7 +98,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore = hs.get_datastores().main self.datastore.get_destination_retry_timings = Mock( - return_value=defer.succeed(None) + return_value=make_awaitable(None) ) self.datastore.get_device_updates_by_remote = Mock( diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index c6e501c7be..96e2e3039b 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -15,7 +15,6 @@ from typing import Tuple from unittest.mock import Mock, patch from urllib.parse import quote -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -30,6 +29,7 @@ from synapse.util import Clock from tests import unittest from tests.storage.test_user_directory import GetUserDirectoryTables +from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_member_event from tests.unittest import override_config @@ -439,7 +439,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) ) - mock_remove_from_user_dir = Mock(return_value=defer.succeed(None)) + mock_remove_from_user_dir = Mock(return_value=make_awaitable(None)) with patch.object( self.store, "remove_from_user_dir", mock_remove_from_user_dir ): @@ -454,7 +454,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.store.register_user(user_id=r_user_id, password_hash=None) ) - mock_remove_from_user_dir = Mock(return_value=defer.succeed(None)) + mock_remove_from_user_dir = Mock(return_value=make_awaitable(None)) with patch.object( self.store, "remove_from_user_dir", mock_remove_from_user_dir ): diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 0abe378fe4..b3738a0304 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -14,7 +14,6 @@ from http import HTTPStatus from unittest.mock import Mock -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor from synapse.handlers.presence import PresenceHandler @@ -24,6 +23,7 @@ from synapse.types import UserID from synapse.util import Clock from tests import unittest +from tests.test_utils import make_awaitable class PresenceTestCase(unittest.HomeserverTestCase): @@ -37,7 +37,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: presence_handler = Mock(spec=PresenceHandler) - presence_handler.set_state.return_value = defer.succeed(None) + presence_handler.set_state.return_value = make_awaitable(None) hs = self.setup_test_homeserver( "red", diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 6ff79b9e2e..9443daa056 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -22,7 +22,6 @@ from typing import Any, Dict, Iterable, List, Optional from unittest.mock import Mock, call from urllib import parse as urlparse -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -1426,9 +1425,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): def test_simple(self) -> None: "Simple test for searching rooms over federation" - self.federation_client.get_public_rooms.side_effect = lambda *a, **k: defer.succeed( # type: ignore[attr-defined] - {} - ) + self.federation_client.get_public_rooms.return_value = make_awaitable({}) # type: ignore[attr-defined] search_filter = {"generic_search_term": "foobar"} @@ -1456,7 +1453,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): # with a 404, when using search filters. self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] HttpResponseException(404, "Not Found", b""), - defer.succeed({}), + make_awaitable({}), ) search_filter = {"generic_search_term": "foobar"} diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 8d8251b2ac..21a1ca2a68 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -22,6 +22,7 @@ from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionC from synapse.util import Clock from tests import unittest +from tests.test_utils import make_awaitable from tests.utils import MockClock @@ -38,7 +39,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def test_executes_given_function(self): - cb = Mock(return_value=defer.succeed(self.mock_http_response)) + cb = Mock(return_value=make_awaitable(self.mock_http_response)) res = yield self.cache.fetch_or_execute( self.mock_key, cb, "some_arg", keyword="arg" ) @@ -47,7 +48,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def test_deduplicates_based_on_key(self): - cb = Mock(return_value=defer.succeed(self.mock_http_response)) + cb = Mock(return_value=make_awaitable(self.mock_http_response)) for i in range(3): # invoke multiple times res = yield self.cache.fetch_or_execute( self.mock_key, cb, "some_arg", keyword="arg", changing_args=i @@ -130,7 +131,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cleans_up(self): - cb = Mock(return_value=defer.succeed(self.mock_http_response)) + cb = Mock(return_value=make_awaitable(self.mock_http_response)) yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") # should NOT have cleaned up yet self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 02b96c9e6e..9ee9509d3a 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -14,8 +14,6 @@ from unittest.mock import Mock -from twisted.internet import defer - from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError from synapse.rest import admin @@ -68,16 +66,16 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): return_value=make_awaitable(1000) ) self._rlsn._server_notices_manager.send_notice = Mock( - return_value=defer.succeed(Mock()) + return_value=make_awaitable(Mock()) ) self._send_notice = self._rlsn._server_notices_manager.send_notice self.user_id = "@user_id:test" self._rlsn._server_notices_manager.get_or_create_notice_room_for_user = Mock( - return_value=defer.succeed("!something:localhost") + return_value=make_awaitable("!something:localhost") ) - self._rlsn._store.add_tag_to_room = Mock(return_value=defer.succeed(None)) + self._rlsn._store.add_tag_to_room = Mock(return_value=make_awaitable(None)) self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) @override_config({"hs_disabled": True}) @@ -95,7 +93,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth.check_auth_blocking = Mock(return_value=defer.succeed(None)) + self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) @@ -111,7 +109,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user has blocked notice, but notice ought to be there (NOOP) """ self._rlsn._auth.check_auth_blocking = Mock( - return_value=defer.succeed(None), side_effect=ResourceLimitError(403, "foo") + return_value=make_awaitable(None), + side_effect=ResourceLimitError(403, "foo"), ) mock_event = Mock( @@ -130,7 +129,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user does not have blocked notice, but should have one """ self._rlsn._auth.check_auth_blocking = Mock( - return_value=defer.succeed(None), side_effect=ResourceLimitError(403, "foo") + return_value=make_awaitable(None), + side_effect=ResourceLimitError(403, "foo"), ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -141,7 +141,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=defer.succeed(None)) + self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -152,7 +152,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=defer.succeed(None)) + self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(None) ) @@ -167,7 +167,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): an alert message is not sent into the room """ self._rlsn._auth.check_auth_blocking = Mock( - return_value=defer.succeed(None), + return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), @@ -182,7 +182,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when a server is disabled, that MAU limit alerting is ignored. """ self._rlsn._auth.check_auth_blocking = Mock( - return_value=defer.succeed(None), + return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED ), @@ -199,14 +199,14 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): is suppressed that the room is returned to an unblocked state. """ self._rlsn._auth.check_auth_blocking = Mock( - return_value=defer.succeed(None), + return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( - return_value=defer.succeed((True, [])) + return_value=make_awaitable((True, [])) ) mock_event = Mock( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 60c8d37594..0fbf465670 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -14,7 +14,6 @@ from typing import Any, Dict, List from unittest.mock import Mock -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import UserTypes @@ -259,10 +258,10 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_populate_monthly_users_should_update(self): self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] - self.store.is_trial_user = Mock(return_value=defer.succeed(False)) # type: ignore[assignment] + self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment] self.store.user_last_seen_monthly_active = Mock( - return_value=defer.succeed(None) + return_value=make_awaitable(None) ) d = self.store.populate_monthly_active_users("user_id") self.get_success(d) @@ -272,9 +271,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_populate_monthly_users_should_not_update(self): self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] - self.store.is_trial_user = Mock(return_value=defer.succeed(False)) # type: ignore[assignment] + self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment] self.store.user_last_seen_monthly_active = Mock( - return_value=defer.succeed(self.hs.get_clock().time_msec()) + return_value=make_awaitable(self.hs.get_clock().time_msec()) ) d = self.store.populate_monthly_active_users("user_id") diff --git a/tests/test_federation.py b/tests/test_federation.py index c39816de85..0cbef70bfa 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -233,7 +233,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( - return_value=succeed( + return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, From ce6ecdd4b4939fd99418bc949b40c01d39480489 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 28 Apr 2022 11:27:42 +0100 Subject: [PATCH 11/87] Allow unused ignores in "bleeding edge" CI Where "bleeding edge" means the Twisted Trunk and Latest Deps jobs. Follow up from #12531. Resolves #12574. --- .github/workflows/latest_deps.yml | 2 ++ .github/workflows/twisted_trunk.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 1a61d179d9..41da1c7408 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -38,6 +38,8 @@ jobs: # `pip install matrix-synapse[all]` as closely as possible. - run: poetry update --no-dev - run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true) + - name: Remove warn_unused_ignores from mypy config + run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: runs-on: ubuntu-latest diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 8fc1affb77..5f0671f350 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -24,6 +24,8 @@ jobs: poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk poetry install --no-interaction --extras "all test" + - name: Remove warn_unused_ignores from mypy config + run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: From f282d5fc1185dde3f9ec31c49b630cff962545d7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 28 Apr 2022 11:29:13 +0100 Subject: [PATCH 12/87] Use `--extras all` in latest deps mypy CI Twisted trunk job already does this. Missed in #12531. --- .github/workflows/latest_deps.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 41da1c7408..c537a5a60f 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -32,6 +32,7 @@ jobs: with: python-version: "3.x" poetry-version: "1.2.0b1" + extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt # Upgrade all runtime dependencies only. This is intended to mimic a fresh From 5a320baa45b8e826e52bdd6cadadfad727ab0357 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 28 Apr 2022 11:31:26 +0100 Subject: [PATCH 13/87] changelog --- changelog.d/12576.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/12576.misc diff --git a/changelog.d/12576.misc b/changelog.d/12576.misc new file mode 100644 index 0000000000..71022c8633 --- /dev/null +++ b/changelog.d/12576.misc @@ -0,0 +1 @@ +Allow unused `#type: ignore` comments in bleeding edge CI jobs. From 5d3509dfda607323892f1cc96d7421612816e803 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 28 Apr 2022 11:32:50 +0100 Subject: [PATCH 14/87] Revert accidental direct-to-develop commits. This reverts commit 5a320baa45b8e826e52bdd6cadadfad727ab0357. This reverts commit f282d5fc1185dde3f9ec31c49b630cff962545d7. This reverts commit ce6ecdd4b4939fd99418bc949b40c01d39480489. --- .github/workflows/latest_deps.yml | 3 --- .github/workflows/twisted_trunk.yml | 2 -- changelog.d/12576.misc | 1 - 3 files changed, 6 deletions(-) delete mode 100644 changelog.d/12576.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index c537a5a60f..1a61d179d9 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -32,15 +32,12 @@ jobs: with: python-version: "3.x" poetry-version: "1.2.0b1" - extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt # Upgrade all runtime dependencies only. This is intended to mimic a fresh # `pip install matrix-synapse[all]` as closely as possible. - run: poetry update --no-dev - run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true) - - name: Remove warn_unused_ignores from mypy config - run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: runs-on: ubuntu-latest diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 5f0671f350..8fc1affb77 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -24,8 +24,6 @@ jobs: poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk poetry install --no-interaction --extras "all test" - - name: Remove warn_unused_ignores from mypy config - run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: diff --git a/changelog.d/12576.misc b/changelog.d/12576.misc deleted file mode 100644 index 71022c8633..0000000000 --- a/changelog.d/12576.misc +++ /dev/null @@ -1 +0,0 @@ -Allow unused `#type: ignore` comments in bleeding edge CI jobs. From 629aa517436c89192815bbd05a4f6ee193b9e839 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Thu, 28 Apr 2022 12:54:46 +0000 Subject: [PATCH 15/87] Add linebreak to pipx install quote in README (#12579) --- README.rst | 4 ++-- changelog.d/12579.doc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12579.doc diff --git a/README.rst b/README.rst index d71d733679..80201d4eb1 100644 --- a/README.rst +++ b/README.rst @@ -296,8 +296,8 @@ directory of your choice:: Synapse has a number of external dependencies. We maintain a fixed development environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend - pip install --user pipx - pipx install poetry + | pip install --user pipx + | pipx install poetry as described `here `_. (See `poetry's installation docs ` diff --git a/changelog.d/12579.doc b/changelog.d/12579.doc new file mode 100644 index 0000000000..bcec5fe1af --- /dev/null +++ b/changelog.d/12579.doc @@ -0,0 +1 @@ +Add missing linebreak to pipx install instructions. From 0b684b59e5ef761575830e9bcdfebcb13c6d2132 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 28 Apr 2022 16:49:50 +0100 Subject: [PATCH 16/87] Fix logging of incorrect status codes for disconnected requests (#12580) The status code of requests must always be set, regardless of client disconnection, otherwise they will always be logged as 200!. Broken for `respond_with_json` in f48792eec43f893f4f893ffdcbf00f8958b6f6b5. Broken for `respond_with_json_bytes` in 3e58ce72b42f2ae473c1e76a967548cd6fa7e2e6. Broken for `respond_with_html_bytes` in ea26e9a98b0541fc886a1cb826a38352b7599dbe. Signed-off-by: Sean Quah --- changelog.d/12580.bugfix | 1 + synapse/http/server.py | 12 +++++++++--- tests/handlers/test_cas.py | 14 +++++++++++++- tests/handlers/test_saml.py | 14 +++++++++++++- 4 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12580.bugfix diff --git a/changelog.d/12580.bugfix b/changelog.d/12580.bugfix new file mode 100644 index 0000000000..bedce405e2 --- /dev/null +++ b/changelog.d/12580.bugfix @@ -0,0 +1 @@ +Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing. diff --git a/synapse/http/server.py b/synapse/http/server.py index b8a7a0f5df..1cf49830e8 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -683,6 +683,9 @@ def respond_with_json( Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ + # The response code must always be set, for logging purposes. + request.setResponseCode(code) + # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. @@ -697,7 +700,6 @@ def respond_with_json( else: encoder = _encode_json_bytes - request.setResponseCode(code) request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") @@ -728,13 +730,15 @@ def respond_with_json_bytes( Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ + # The response code must always be set, for logging purposes. + request.setResponseCode(code) + if request._disconnected: logger.warning( "Not sending response to request %s, already disconnected.", request ) return None - request.setResponseCode(code) request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") @@ -840,6 +844,9 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N code: The HTTP response code. html_bytes: The HTML bytes to use as the response body. """ + # The response code must always be set, for logging purposes. + request.setResponseCode(code) + # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. @@ -849,7 +856,6 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N ) return None - request.setResponseCode(code) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index a54aa29cf1..751025c5da 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -201,4 +201,16 @@ class CasHandlerTestCase(HomeserverTestCase): def _mock_request(): """Returns a mock which will stand in as a SynapseRequest""" - return Mock(spec=["getClientIP", "getHeader", "_disconnected"]) + mock = Mock( + spec=[ + "finish", + "getClientIP", + "getHeader", + "setHeader", + "setResponseCode", + "write", + ] + ) + # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. + mock._disconnected = False + return mock diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 8d4404eda1..e2f0f90ef1 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -349,4 +349,16 @@ class SamlHandlerTestCase(HomeserverTestCase): def _mock_request(): """Returns a mock which will stand in as a SynapseRequest""" - return Mock(spec=["getClientIP", "getHeader", "_disconnected"]) + mock = Mock( + spec=[ + "finish", + "getClientIP", + "getHeader", + "setHeader", + "setResponseCode", + "write", + ] + ) + # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. + mock._disconnected = False + return mock From 0d9eaa19fd4f934d2a249bc8fb0191fbac30dd0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Thu, 28 Apr 2022 13:34:12 -0400 Subject: [PATCH 17/87] Use constants for receipt types in tests. (#12582) --- changelog.d/12582.misc | 1 + tests/handlers/test_receipts.py | 38 +++++++++---------- .../slave/storage/test_receipts.py | 11 ++++-- tests/rest/client/test_sync.py | 3 +- 4 files changed, 30 insertions(+), 23 deletions(-) create mode 100644 changelog.d/12582.misc diff --git a/changelog.d/12582.misc b/changelog.d/12582.misc new file mode 100644 index 0000000000..5fa9c9afe8 --- /dev/null +++ b/changelog.d/12582.misc @@ -0,0 +1 @@ +Use constants for read-receipts in tests. diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index 5081b97573..65ab7db0c8 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -15,7 +15,7 @@ from typing import List -from synapse.api.constants import ReadReceiptEventFields +from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes from synapse.types import JsonDict from tests import unittest @@ -35,7 +35,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@rikj:jki.re": { "ts": 1436451550453, "hidden": True, @@ -56,7 +56,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916hfgh4394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@me:server.org": { "ts": 1436451550453, "hidden": True, @@ -72,7 +72,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916hfgh4394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@me:server.org": { "ts": 1436451550453, ReadReceiptEventFields.MSC2285_HIDDEN: True, @@ -92,7 +92,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@rikj:jki.re": { "ts": 1436451550453, "hidden": True, @@ -111,7 +111,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -130,7 +130,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$14356419edgd14394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@rikj:jki.re": { "ts": 1436451550453, "hidden": True, @@ -138,7 +138,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): } }, "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -153,7 +153,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -171,9 +171,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [ { "content": { - "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$14356419ggffg114394fHBLK:matrix.org": {ReceiptTypes.READ: {}}, "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -187,9 +187,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [ { "content": { - "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$14356419ggffg114394fHBLK:matrix.org": {ReceiptTypes.READ: {}}, "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -209,7 +209,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): "content": { "$143564gdfg6114394fHBLK:matrix.org": {}, "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -225,7 +225,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): "content": { "$143564gdfg6114394fHBLK:matrix.org": {}, "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -244,7 +244,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$14356419edgd14394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@rikj:jki.re": { "ts": 1436451550453, "hidden": True, @@ -258,7 +258,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -273,7 +273,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$1435641916114394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, } @@ -297,7 +297,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$14356419edgd14394fHBLK:matrix.org": { - "m.read": { + ReceiptTypes.READ: { "@rikj:jki.re": "string", } }, diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py index f47d94f690..de19e75b9d 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/replication/slave/storage/test_receipts.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.api.constants import ReceiptTypes from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from ._base import BaseSlavedStoreTestCase @@ -26,9 +27,13 @@ class SlavedReceiptTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedReceiptsStore def test_receipt(self): - self.check("get_receipts_for_user", [USER_ID, "m.read"], {}) + self.check("get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {}) self.get_success( - self.master_store.insert_receipt(ROOM_ID, "m.read", USER_ID, [EVENT_ID], {}) + self.master_store.insert_receipt( + ROOM_ID, ReceiptTypes.READ, USER_ID, [EVENT_ID], {} + ) ) self.replicate() - self.check("get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID}) + self.check( + "get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {ROOM_ID: EVENT_ID} + ) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 773c16a54c..cb765455c1 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -24,6 +24,7 @@ from synapse.api.constants import ( EventContentFields, EventTypes, ReadReceiptEventFields, + ReceiptTypes, RelationTypes, ) from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync @@ -560,7 +561,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. - body = json.dumps({"m.read": res["event_id"]}).encode("utf8") + body = json.dumps({ReceiptTypes.READ: res["event_id"]}).encode("utf8") channel = self.make_request( "POST", "/rooms/%s/read_markers" % self.room_id, From 3ae56d125c95fe6919cdf7892b9e6a6cdba651a4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 28 Apr 2022 13:58:58 -0400 Subject: [PATCH 18/87] Improve the docstrings for the receipts store. (#12581) --- changelog.d/12581.misc | 1 + synapse/storage/databases/main/receipts.py | 56 ++++++++++++++++++++-- 2 files changed, 52 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12581.misc diff --git a/changelog.d/12581.misc b/changelog.d/12581.misc new file mode 100644 index 0000000000..38d40b262b --- /dev/null +++ b/changelog.d/12581.misc @@ -0,0 +1 @@ +Improve docstrings for the receipts store. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 332e901dda..7d96f4feda 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -122,10 +122,21 @@ class ReceiptsWorkerStore(SQLBaseStore): receipts = await self.get_receipts_for_room(room_id, ReceiptTypes.READ) return {r["user_id"] for r in receipts} - @cached(num_args=2) + @cached() async def get_receipts_for_room( self, room_id: str, receipt_type: str ) -> List[Dict[str, Any]]: + """ + Fetch the event IDs for the latest receipt for all users in a room with the given receipt type. + + Args: + room_id: The room ID to fetch the receipt for. + receipt_type: The receipt type to fetch. + + Returns: + A list of dictionaries, one for each user ID. Each dictionary + contains a user ID and the event ID of that user's latest receipt. + """ return await self.db_pool.simple_select_list( table="receipts_linearized", keyvalues={"room_id": room_id, "receipt_type": receipt_type}, @@ -133,10 +144,21 @@ class ReceiptsWorkerStore(SQLBaseStore): desc="get_receipts_for_room", ) - @cached(num_args=3) + @cached() async def get_last_receipt_event_id_for_user( self, user_id: str, room_id: str, receipt_type: str ) -> Optional[str]: + """ + Fetch the event ID for the latest receipt in a room with the given receipt type. + + Args: + user_id: The user to fetch receipts for. + room_id: The room ID to fetch the receipt for. + receipt_type: The receipt type to fetch. + + Returns: + The event ID of the latest receipt, if one exists; otherwise `None`. + """ return await self.db_pool.simple_select_one_onecol( table="receipts_linearized", keyvalues={ @@ -149,10 +171,23 @@ class ReceiptsWorkerStore(SQLBaseStore): allow_none=True, ) - @cached(num_args=2) + @cached() async def get_receipts_for_user( self, user_id: str, receipt_type: str ) -> Dict[str, str]: + """ + Fetch the event IDs for the latest receipts sent by the given user. + + Args: + user_id: The user to fetch receipts for. + receipt_type: The receipt type to fetch. + + Returns: + A map of room ID to the event ID of the latest receipt for that room. + + If the user has not sent a receipt to a room then it will not appear + in the returned dictionary. + """ rows = await self.db_pool.simple_select_list( table="receipts_linearized", keyvalues={"user_id": user_id, "receipt_type": receipt_type}, @@ -165,6 +200,17 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_receipts_for_user_with_orderings( self, user_id: str, receipt_type: str ) -> JsonDict: + """ + Fetch receipts for all rooms that the given user is joined to. + + Args: + user_id: The user to fetch receipts for. + receipt_type: The receipt type to fetch. + + Returns: + A map of room ID to the latest receipt information. + """ + def f(txn: LoggingTransaction) -> List[Tuple[str, str, int, int]]: sql = ( "SELECT rl.room_id, rl.event_id," @@ -241,7 +287,7 @@ class ReceiptsWorkerStore(SQLBaseStore): return await self._get_linearized_receipts_for_room(room_id, to_key, from_key) - @cached(num_args=3, tree=True) + @cached(tree=True) async def _get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None ) -> List[JsonDict]: @@ -541,7 +587,7 @@ class ReceiptsWorkerStore(SQLBaseStore): data: JsonDict, stream_id: int, ) -> Optional[int]: - """Inserts a read-receipt into the database if it's newer than the current RR + """Inserts a receipt into the database if it's newer than the current one. Returns: None if the RR is older than the current RR From 57fac2a2347e0b386385868cdd1ae435364062b5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 29 Apr 2022 17:57:23 +0100 Subject: [PATCH 19/87] Allow unused ignores in "bleeding edge" CI (#12576) * Allow unused ignores in "bleeding edge" CI Where "bleeding edge" means the Twisted Trunk and Latest Deps jobs. Follow up from #12531. Resolves #12574. * Use `--extras all` in latest deps mypy CI Twisted trunk job already does this. Missed in #12531. * changelog --- .github/workflows/latest_deps.yml | 3 +++ .github/workflows/twisted_trunk.yml | 2 ++ changelog.d/12576.misc | 1 + 3 files changed, 6 insertions(+) create mode 100644 changelog.d/12576.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 1a61d179d9..c537a5a60f 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -32,12 +32,15 @@ jobs: with: python-version: "3.x" poetry-version: "1.2.0b1" + extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt # Upgrade all runtime dependencies only. This is intended to mimic a fresh # `pip install matrix-synapse[all]` as closely as possible. - run: poetry update --no-dev - run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true) + - name: Remove warn_unused_ignores from mypy config + run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: runs-on: ubuntu-latest diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 8fc1affb77..5f0671f350 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -24,6 +24,8 @@ jobs: poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk poetry install --no-interaction --extras "all test" + - name: Remove warn_unused_ignores from mypy config + run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: diff --git a/changelog.d/12576.misc b/changelog.d/12576.misc new file mode 100644 index 0000000000..71022c8633 --- /dev/null +++ b/changelog.d/12576.misc @@ -0,0 +1 @@ +Allow unused `#type: ignore` comments in bleeding edge CI jobs. From 8d156ec0ba17d848581f18aa40ebfd76dda763d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 29 Apr 2022 22:05:18 +0100 Subject: [PATCH 20/87] Remove special-case for `twisted` logger (#12589) This was originally added when we first added a `MemoryHandler` to the default log config back in https://github.com/matrix-org/synapse/pull/8040, to ensure that we didn't explode with an infinite loop if there was an error formatting the logs. Since then, we made additional improvements to logging which make this workaround redundant. In particular: * we no longer attempt to log un-UTF8-decodable byte sequences, which were the most likely cause of an error in the first place. * https://github.com/matrix-org/synapse/pull/8268 ensures that in the unlikely case that there *is* an error, it won't cause an infinite loop. --- changelog.d/12589.misc | 1 + docs/sample_log_config.yaml | 7 ------- synapse/config/logger.py | 7 ------- 3 files changed, 1 insertion(+), 14 deletions(-) create mode 100644 changelog.d/12589.misc diff --git a/changelog.d/12589.misc b/changelog.d/12589.misc new file mode 100644 index 0000000000..d362828d2e --- /dev/null +++ b/changelog.d/12589.misc @@ -0,0 +1 @@ +Remove special-case for `twisted` logger from default log config. diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 2485ad25ed..3065a0e2d9 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -62,13 +62,6 @@ loggers: # information such as access tokens. level: INFO - twisted: - # We send the twisted logging directly to the file handler, - # to work around https://github.com/matrix-org/synapse/issues/3471 - # when using "buffer" logger. Use "console" to log to stderr instead. - handlers: [file] - propagate: false - root: level: INFO diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 99db9e1e39..470b8b4492 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -110,13 +110,6 @@ loggers: # information such as access tokens. level: INFO - twisted: - # We send the twisted logging directly to the file handler, - # to work around https://github.com/matrix-org/synapse/issues/3471 - # when using "buffer" logger. Use "console" to log to stderr instead. - handlers: [file] - propagate: false - root: level: INFO From 8f5d2823dff26d4c8756f1db348b3b4908ef27f5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 3 May 2022 10:53:09 +0100 Subject: [PATCH 21/87] 1.58.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1fbe0815de..f0fc4e5bae 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.58.0 (2022-05-03) +=========================== + +No significant changes since 1.58.0rc2. + + Synapse 1.58.0rc2 (2022-04-26) ============================== diff --git a/debian/changelog b/debian/changelog index 5f1bf872bb..53b2387776 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.58.0) stable; urgency=medium + + * New Synapse release 1.58.0. + + -- Synapse Packaging team Tue, 03 May 2022 10:52:58 +0100 + matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium * New Synapse release 1.58.0rc2. diff --git a/pyproject.toml b/pyproject.toml index bdded78434..f0f029f016 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.58.0rc2" +version = "1.58.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 7e6598bcf6cf63b4d04c27b9696ed8429b6ddff6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 3 May 2022 10:54:20 +0100 Subject: [PATCH 22/87] Move groups/communities deprecation notice to 1.58.0 heading --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f0fc4e5bae..31f1561274 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.58.0 (2022-05-03) =========================== +As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61. + No significant changes since 1.58.0rc2. @@ -25,8 +27,6 @@ Internal Changes Synapse 1.58.0rc1 (2022-04-26) ============================== -As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61. - Features -------- From 01dcf7532d7863b2721156a4817716a8628fe73f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 May 2022 11:03:20 +0100 Subject: [PATCH 23/87] Prune mypy ignore_missing_imports list (#12608) --- changelog.d/12608.misc | 1 + mypy.ini | 53 ++---------------------------------------- 2 files changed, 3 insertions(+), 51 deletions(-) create mode 100644 changelog.d/12608.misc diff --git a/changelog.d/12608.misc b/changelog.d/12608.misc new file mode 100644 index 0000000000..38272118fb --- /dev/null +++ b/changelog.d/12608.misc @@ -0,0 +1 @@ +Remove redundant lines of config from `mypy.ini`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index ef28216418..78699e3187 100644 --- a/mypy.ini +++ b/mypy.ini @@ -241,98 +241,49 @@ disallow_untyped_defs = True [mypy-authlib.*] ignore_missing_imports = True -[mypy-bcrypt] -ignore_missing_imports = True - [mypy-canonicaljson] ignore_missing_imports = True [mypy-constantly] ignore_missing_imports = True -[mypy-daemonize] -ignore_missing_imports = True - -[mypy-h11] -ignore_missing_imports = True - -[mypy-hiredis] -ignore_missing_imports = True - -[mypy-hyperlink] -ignore_missing_imports = True - [mypy-ijson.*] ignore_missing_imports = True -[mypy-importlib_metadata.*] -ignore_missing_imports = True - -[mypy-jaeger_client.*] -ignore_missing_imports = True - -[mypy-josepy.*] -ignore_missing_imports = True - -[mypy-jwt.*] -ignore_missing_imports = True - [mypy-lxml] ignore_missing_imports = True [mypy-msgpack] ignore_missing_imports = True -[mypy-nacl.*] -ignore_missing_imports = True - +# Note: WIP stubs available at +# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr [mypy-netaddr] ignore_missing_imports = True [mypy-parameterized.*] ignore_missing_imports = True -[mypy-phonenumbers.*] -ignore_missing_imports = True - -[mypy-prometheus_client.*] -ignore_missing_imports = True - [mypy-pymacaroons.*] ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True -[mypy-redbaron.*] -ignore_missing_imports = True - [mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True [mypy-saml2.*] ignore_missing_imports = True -[mypy-sentry_sdk] -ignore_missing_imports = True - [mypy-service_identity.*] ignore_missing_imports = True -[mypy-signedjson.*] -ignore_missing_imports = True - [mypy-srvlookup.*] ignore_missing_imports = True [mypy-treq.*] ignore_missing_imports = True -[mypy-twisted.*] -ignore_missing_imports = True - -[mypy-zope] -ignore_missing_imports = True - [mypy-incremental.*] ignore_missing_imports = True From ae7858f184f4fd1533d0a5dd70174e3c70f529ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 May 2022 11:47:21 +0100 Subject: [PATCH 24/87] Fix race when persisting an event and deleting a room (#12594) This works by taking a row level lock on the `rooms` table at the start of both transactions, ensuring that they don't run at the same time. In the event persistence transaction we also check that there is an entry still in the `rooms` table. I can't figure out how to do this in SQLite. I was just going to lock the table, but it seems that we don't support that in SQLite either, so I'm *really* confused as to how we maintain integrity in SQLite when using `lock_table`.... --- changelog.d/12594.bugfix | 1 + synapse/storage/databases/main/events.py | 15 +++++++++++++++ synapse/storage/databases/main/purge_events.py | 8 ++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12594.bugfix diff --git a/changelog.d/12594.bugfix b/changelog.d/12594.bugfix new file mode 100644 index 0000000000..7411d9c079 --- /dev/null +++ b/changelog.d/12594.bugfix @@ -0,0 +1 @@ +Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 2a1e567ce0..9a6c2fd47a 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -47,6 +47,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.storage.databases.main.search import SearchEntry +from synapse.storage.engines.postgres import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator from synapse.types import StateMap, get_domain_from_id @@ -364,6 +365,20 @@ class PersistEventsStore: min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering + # We check that the room still exists for events we're trying to + # persist. This is to protect against races with deleting a room. + # + # Annoyingly SQLite doesn't support row level locking. + if isinstance(self.database_engine, PostgresEngine): + for room_id in {e.room_id for e, _ in events_and_contexts}: + txn.execute( + "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE", + (room_id,), + ) + row = txn.fetchone() + if row is None: + raise Exception(f"Room does not exist {room_id}") + # stream orderings should have been assigned by now assert min_stream_order assert max_stream_order diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 2e3818e432..bfc85b3add 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -324,7 +324,12 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): ) def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: - # First we fetch all the state groups that should be deleted, before + # We *immediately* delete the room from the rooms table. This ensures + # that we don't race when persisting events (as that transaction checks + # that the room exists). + txn.execute("DELETE FROM rooms WHERE room_id = ?", (room_id,)) + + # Next, we fetch all the state groups that should be deleted, before # we delete that information. txn.execute( """ @@ -403,7 +408,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): "room_stats_state", "room_stats_current", "room_stats_earliest_token", - "rooms", "stream_ordering_to_exterm", "users_in_public_rooms", "users_who_share_private_rooms", From bf2fea8f7dace1aa6597b9fc8fa782433409d718 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 May 2022 11:50:03 +0100 Subject: [PATCH 25/87] Add sanity checks to the release script (#12556) Check we're on the right branch before tagging, and on the right tag before uploading * Abort if we're on the wrong branch * Check we have the right tag checked out * Clarify that `publish` only releases to GitHub --- changelog.d/12556.misc | 1 + scripts-dev/release.py | 63 +++++++++++++++++++++++++----------------- 2 files changed, 38 insertions(+), 26 deletions(-) create mode 100644 changelog.d/12556.misc diff --git a/changelog.d/12556.misc b/changelog.d/12556.misc new file mode 100644 index 0000000000..dc245397fb --- /dev/null +++ b/changelog.d/12556.misc @@ -0,0 +1 @@ +Release script: confirm the commit to be tagged before tagging. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 14f3f3a45d..f4269e09bb 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -89,13 +89,7 @@ def prepare() -> None: """ # Make sure we're in a git repo. - try: - repo = git.Repo() - except git.InvalidGitRepositoryError: - raise click.ClickException("Not in Synapse repo.") - - if repo.is_dirty(): - raise click.ClickException("Uncommitted changes exist.") + repo = get_repo_and_check_clean_checkout() click.secho("Updating git repo...") repo.remote().fetch() @@ -171,9 +165,7 @@ def prepare() -> None: assert not parsed_new_version.is_devrelease assert not parsed_new_version.is_postrelease - release_branch_name = ( - f"release-v{parsed_new_version.major}.{parsed_new_version.minor}" - ) + release_branch_name = get_release_branch_name(parsed_new_version) release_branch = find_ref(repo, release_branch_name) if release_branch: if release_branch.is_remote(): @@ -274,13 +266,7 @@ def tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" # Make sure we're in a git repo. - try: - repo = git.Repo() - except git.InvalidGitRepositoryError: - raise click.ClickException("Not in Synapse repo.") - - if repo.is_dirty(): - raise click.ClickException("Uncommitted changes exist.") + repo = get_repo_and_check_clean_checkout() click.secho("Updating git repo...") repo.remote().fetch() @@ -293,6 +279,15 @@ def tag(gh_token: Optional[str]) -> None: if tag_name in repo.tags: raise click.ClickException(f"Tag {tag_name} already exists!\n") + # Check we're on the right release branch + release_branch = get_release_branch_name(current_version) + if repo.active_branch.name != release_branch: + click.echo( + f"Need to be on the release branch ({release_branch}) before tagging. " + f"Currently on ({repo.active_branch.name})." + ) + click.get_current_context().abort() + # Get the appropriate changelogs and tag. changes = get_changes_for_version(current_version) @@ -358,21 +353,15 @@ def tag(gh_token: Optional[str]) -> None: @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) def publish(gh_token: str) -> None: - """Publish release.""" + """Publish release on GitHub.""" # Make sure we're in a git repo. - try: - repo = git.Repo() - except git.InvalidGitRepositoryError: - raise click.ClickException("Not in Synapse repo.") - - if repo.is_dirty(): - raise click.ClickException("Uncommitted changes exist.") + get_repo_and_check_clean_checkout() current_version = get_package_version() tag_name = f"v{current_version}" - if not click.confirm(f"Publish {tag_name}?", default=True): + if not click.confirm(f"Publish release {tag_name} on GitHub?", default=True): return # Publish the draft release @@ -406,6 +395,13 @@ def upload() -> None: current_version = get_package_version() tag_name = f"v{current_version}" + # Check we have the right tag checked out. + repo = get_repo_and_check_clean_checkout() + tag = repo.tag(f"refs/tags/{tag_name}") + if repo.head.commit != tag.commit: + click.echo("Tag {tag_name} (tag.commit) is not currently checked out!") + click.get_current_context().abort() + pypi_asset_names = [ f"matrix_synapse-{current_version}-py3-none-any.whl", f"matrix-synapse-{current_version}.tar.gz", @@ -469,6 +465,21 @@ def get_package_version() -> version.Version: return version.Version(version_string) +def get_release_branch_name(version_number: version.Version) -> str: + return f"release-v{version_number.major}.{version_number.minor}" + + +def get_repo_and_check_clean_checkout() -> git.Repo: + """Get the project repo and check it's not got any uncommitted changes.""" + try: + repo = git.Repo() + except git.InvalidGitRepositoryError: + raise click.ClickException("Not in Synapse repo.") + if repo.is_dirty(): + raise click.ClickException("Uncommitted changes exist.") + return repo + + def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.references: From db2edf5a65c5bcac565e052b2dbd74253755a717 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 3 May 2022 13:47:56 +0100 Subject: [PATCH 26/87] Exclude OOB memberships from the federation sender (#12570) As the comment says, there is no need to process such events, and indeed we need to avoid doing so. Fixes #12509. --- changelog.d/12570.bugfix | 1 + synapse/events/__init__.py | 15 ++++++++--- synapse/federation/sender/__init__.py | 39 +++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12570.bugfix diff --git a/changelog.d/12570.bugfix b/changelog.d/12570.bugfix new file mode 100644 index 0000000000..1038646f35 --- /dev/null +++ b/changelog.d/12570.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 9acb3c0cc4..3e4b4f0384 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -213,10 +213,17 @@ class _EventInternalMetadata: return self.outlier def is_out_of_band_membership(self) -> bool: - """Whether this is an out of band membership, like an invite or an invite - rejection. This is needed as those events are marked as outliers, but - they still need to be processed as if they're new events (e.g. updating - invite state in the database, relaying to clients, etc). + """Whether this event is an out-of-band membership. + + OOB memberships are a special case of outlier events: they are membership events + for federated rooms that we aren't full members. Examples include invites + received over federation, and rejections for such invites. + + The concept of an OOB membership is needed because these events need to be + processed as if they're new regular events (e.g. updating membership state in + the database, relaying to clients via /sync, etc) despite being outliers. + + See also https://matrix-org.github.io/synapse/develop/development/room-dag-concepts.html#out-of-band-membership-events. (Added in synapse 0.99.0, so may be unreliable for events received before that) """ diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 30e2421efc..d795b7b26b 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -355,6 +355,45 @@ class FederationSender(AbstractFederationSender): if not is_mine and send_on_behalf_of is None: return + # We also want to not send out-of-band membership events. + # + # OOB memberships are used in three (and a half) situations: + # + # (1) invite events which we have received over federation. Those + # will have a `sender` on a different server, so will be + # skipped by the "is_mine" test above anyway. + # + # (2) rejections of invites to federated rooms - either remotely + # or locally generated. (Such rejections are normally + # created via federation, in which case the remote server is + # responsible for sending out the rejection. If that fails, + # we'll create a leave event locally, but that's only really + # for the benefit of the invited user - we don't have enough + # information to send it out over federation). + # + # (2a) rescinded knocks. These are identical to rejected invites. + # + # (3) knock events which we have sent over federation. As with + # invite rejections, the remote server should send them out to + # the federation. + # + # So, in all the above cases, we want to ignore such events. + # + # OOB memberships are always(?) outliers anyway, so if we *don't* + # ignore them, we'll get an exception further down when we try to + # fetch the membership list for the room. + # + # Arguably, we could equivalently ignore all outliers here, since + # in theory the only way for an outlier with a local `sender` to + # exist is by being an OOB membership (via one of (2), (2a) or (3) + # above). + # + if event.internal_metadata.is_out_of_band_membership(): + return + + # Finally, there are some other events that we should not send out + # until someone asks for them. They are explicitly flagged as such + # with `proactively_send: False`. if not event.internal_metadata.should_proactively_send(): return From 5938928c59bac95670c3e582025dd6d819992e4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 3 May 2022 13:50:50 +0100 Subject: [PATCH 27/87] minor wording fix in docstring --- synapse/events/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 3e4b4f0384..56c6ccc22c 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -14,8 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc -import os +import abcimport os from typing import ( TYPE_CHECKING, Any, @@ -216,7 +215,7 @@ class _EventInternalMetadata: """Whether this event is an out-of-band membership. OOB memberships are a special case of outlier events: they are membership events - for federated rooms that we aren't full members. Examples include invites + for federated rooms that we aren't full members of. Examples include invites received over federation, and rejections for such invites. The concept of an OOB membership is needed because these events need to be From 77dee1b451aa5c5c3644e5e4e5afefda2a44aa15 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 3 May 2022 13:59:28 +0100 Subject: [PATCH 28/87] fix imports broken in 5938928 :-S --- synapse/events/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 56c6ccc22c..c238376caf 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abcimport os +import abc +import os from typing import ( TYPE_CHECKING, Any, From c4514b97db551215334c4128cbc8ef12def560ba Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 3 May 2022 15:46:42 +0100 Subject: [PATCH 29/87] Add missing space before 'docker' link in release announcement script (#12612) --- changelog.d/12612.bugfix | 1 + scripts-dev/release.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12612.bugfix diff --git a/changelog.d/12612.bugfix b/changelog.d/12612.bugfix new file mode 100644 index 0000000000..c39e97f0cb --- /dev/null +++ b/changelog.d/12612.bugfix @@ -0,0 +1 @@ +Fix a typo in the announcement text generated by the Synapse release development script. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index f4269e09bb..0031ba3e4b 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -434,7 +434,7 @@ def announce() -> None: f""" Hi everyone. Synapse {current_version} has just been released. -[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\ +[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/)""" From d66d68f9177d3addc32f6ab02458e3620f5ffc1f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 3 May 2022 16:32:40 +0100 Subject: [PATCH 30/87] Add extra debug logging to federation sender (#12614) ... in order to debug some problems we've been having with certain events not being sent when expected. --- changelog.d/12614.misc | 1 + synapse/federation/sender/__init__.py | 20 ++++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12614.misc diff --git a/changelog.d/12614.misc b/changelog.d/12614.misc new file mode 100644 index 0000000000..79022df127 --- /dev/null +++ b/changelog.d/12614.misc @@ -0,0 +1 @@ +Add extra debug logging to federation sender. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index d795b7b26b..6d2f46318b 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -343,9 +343,16 @@ class FederationSender(AbstractFederationSender): last_token, self._last_poked_id, limit=100 ) - logger.debug("Handling %s -> %s", last_token, next_token) + logger.debug( + "Handling %i -> %i: %i events to send (current id %i)", + last_token, + next_token, + len(events), + self._last_poked_id, + ) if not events and next_token >= self._last_poked_id: + logger.debug("All events processed") break async def handle_event(event: EventBase) -> None: @@ -353,6 +360,7 @@ class FederationSender(AbstractFederationSender): send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: + logger.debug("Not sending remote-origin event %s", event) return # We also want to not send out-of-band membership events. @@ -389,12 +397,16 @@ class FederationSender(AbstractFederationSender): # above). # if event.internal_metadata.is_out_of_band_membership(): + logger.debug("Not sending OOB membership event %s", event) return # Finally, there are some other events that we should not send out # until someone asks for them. They are explicitly flagged as such # with `proactively_send: False`. if not event.internal_metadata.should_proactively_send(): + logger.debug( + "Not sending event with proactively_send=false: %s", event + ) return destinations: Optional[Set[str]] = None @@ -458,7 +470,10 @@ class FederationSender(AbstractFederationSender): "federation_sender" ).observe((now - ts) / 1000) - async def handle_room_events(events: Iterable[EventBase]) -> None: + async def handle_room_events(events: List[EventBase]) -> None: + logger.debug( + "Handling %i events in room %s", len(events), events[0].room_id + ) with Measure(self.clock, "handle_room_events"): for event in events: await handle_event(event) @@ -477,6 +492,7 @@ class FederationSender(AbstractFederationSender): ) ) + logger.debug("Successfully handled up to %i", next_token) await self.store.update_federation_out_pos("events", next_token) if events: From aa5f5ede3331bf916f459137e9d0df3ac0743093 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 3 May 2022 12:43:12 -0400 Subject: [PATCH 31/87] Remove unstable identifiers for MSC3069. (#12596) --- changelog.d/12596.removal | 1 + synapse/rest/client/account.py | 2 -- tests/rest/client/test_account.py | 6 ------ 3 files changed, 1 insertion(+), 8 deletions(-) create mode 100644 changelog.d/12596.removal diff --git a/changelog.d/12596.removal b/changelog.d/12596.removal new file mode 100644 index 0000000000..14fbfb3954 --- /dev/null +++ b/changelog.d/12596.removal @@ -0,0 +1 @@ +Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 5587cae98a..bdc4a9c068 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -882,9 +882,7 @@ class WhoamiRestServlet(RestServlet): response = { "user_id": requester.user.to_string(), - # MSC: https://github.com/matrix-org/matrix-doc/pull/3069 # Entered spec in Matrix 1.2 - "org.matrix.msc3069.is_guest": bool(requester.is_guest), "is_guest": bool(requester.is_guest), } diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index e00b5c171c..e0a11da97b 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -520,8 +520,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase): { "user_id": user_id, "device_id": device_id, - # MSC3069 entered spec in Matrix 1.2 but maintained compatibility - "org.matrix.msc3069.is_guest": False, "is_guest": False, }, ) @@ -540,8 +538,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase): { "user_id": user_id, "device_id": device_id, - # MSC3069 entered spec in Matrix 1.2 but maintained compatibility - "org.matrix.msc3069.is_guest": True, "is_guest": True, }, ) @@ -564,8 +560,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase): whoami, { "user_id": user_id, - # MSC3069 entered spec in Matrix 1.2 but maintained compatibility - "org.matrix.msc3069.is_guest": False, "is_guest": False, }, ) From 9ce51a47f6e37abd0a1275281806399d874eb026 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 May 2022 19:22:06 +0100 Subject: [PATCH 32/87] Bump Synapse minimum Python version to 3.7.1 (#12613) --- changelog.d/12613.removal | 1 + poetry.lock | 4 ++-- pyproject.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12613.removal diff --git a/changelog.d/12613.removal b/changelog.d/12613.removal new file mode 100644 index 0000000000..b1a9e207b0 --- /dev/null +++ b/changelog.d/12613.removal @@ -0,0 +1 @@ +Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. diff --git a/poetry.lock b/poetry.lock index e27a44989c..89e78576b7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1561,8 +1561,8 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" -python-versions = "^3.7" -content-hash = "3825cef058b8c9f520ef4b7acb92519be95db9a663a61c2e89a5fe431ed55655" +python-versions = "^3.7.1" +content-hash = "2bda1a7cfc8cc02832b4a7d16bf7e1615cb05e0639bdb30688aadf692d851942" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index 62e26fd95b..cf3982d124 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -100,7 +100,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" update_synapse_database = "synapse._scripts.update_synapse_database:main" [tool.poetry.dependencies] -python = "^3.7" +python = "^3.7.1" # Mandatory Dependencies # ---------------------- From 96e0cdbc5af0563ee805ec4e588e1df14899af66 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 3 May 2022 21:27:52 +0100 Subject: [PATCH 33/87] Add a consistency check on events read from the database (#12620) I've seen a few errors which can only plausibly be explained by the calculated event id for an event being different from the ID of the event in the database. It should be cheap to check this, so let's do so and raise an exception. --- changelog.d/12620.misc | 1 + .../storage/databases/main/events_worker.py | 12 ++++ .../databases/main/test_events_worker.py | 59 ++++++++++++------- 3 files changed, 50 insertions(+), 22 deletions(-) create mode 100644 changelog.d/12620.misc diff --git a/changelog.d/12620.misc b/changelog.d/12620.misc new file mode 100644 index 0000000000..63f8e540c3 --- /dev/null +++ b/changelog.d/12620.misc @@ -0,0 +1 @@ +Add a consistency check on events which we read from the database. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c31fc00eaa..0a48e5d29f 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1094,6 +1094,18 @@ class EventsWorkerStore(SQLBaseStore): original_ev.internal_metadata.stream_ordering = row.stream_ordering original_ev.internal_metadata.outlier = row.outlier + # Consistency check: if the content of the event has been modified in the + # database, then the calculated event ID will not match the event id in the + # database. + if original_ev.event_id != event_id: + # it's difficult to see what to do here. Pretty much all bets are off + # if Synapse cannot rely on the consistency of its database. + raise RuntimeError( + f"Database corruption: Event {event_id} in room {d['room_id']} " + f"from the database appears to have been modified (calculated " + f"event id {original_ev.event_id})" + ) + event_map[event_id] = original_ev # finally, we can decide whether each one needs redacting, and build diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index bf6374f93d..c237a8c7e2 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -13,7 +13,7 @@ # limitations under the License. import json from contextlib import contextmanager -from typing import Generator, Tuple +from typing import Generator, List, Tuple from unittest import mock from twisted.enterprise.adbapi import ConnectionPool @@ -21,6 +21,7 @@ from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import EventFormatVersions, RoomVersions +from synapse.events import make_event_from_dict from synapse.logging.context import LoggingContext from synapse.rest import admin from synapse.rest.client import login, room @@ -49,23 +50,28 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): ) ) - for idx, (rid, eid) in enumerate( + self.event_ids: List[str] = [] + for idx, rid in enumerate( ( - ("room1", "event10"), - ("room1", "event11"), - ("room1", "event12"), - ("room2", "event20"), + "room1", + "room1", + "room1", + "room2", ) ): + event_json = {"type": f"test {idx}", "room_id": rid} + event = make_event_from_dict(event_json, room_version=RoomVersions.V4) + event_id = event.event_id + self.get_success( self.store.db_pool.simple_insert( "events", { - "event_id": eid, + "event_id": event_id, "room_id": rid, "topological_ordering": idx, "stream_ordering": idx, - "type": "test", + "type": event.type, "processed": True, "outlier": False, }, @@ -75,21 +81,22 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): self.store.db_pool.simple_insert( "event_json", { - "event_id": eid, + "event_id": event_id, "room_id": rid, - "json": json.dumps({"type": "test", "room_id": rid}), + "json": json.dumps(event_json), "internal_metadata": "{}", "format_version": 3, }, ) ) + self.event_ids.append(event_id) def test_simple(self): with LoggingContext(name="test") as ctx: res = self.get_success( - self.store.have_seen_events("room1", ["event10", "event19"]) + self.store.have_seen_events("room1", [self.event_ids[0], "event19"]) ) - self.assertEqual(res, {"event10"}) + self.assertEqual(res, {self.event_ids[0]}) # that should result in a single db query self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) @@ -97,19 +104,21 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): # a second lookup of the same events should cause no queries with LoggingContext(name="test") as ctx: res = self.get_success( - self.store.have_seen_events("room1", ["event10", "event19"]) + self.store.have_seen_events("room1", [self.event_ids[0], "event19"]) ) - self.assertEqual(res, {"event10"}) + self.assertEqual(res, {self.event_ids[0]}) self.assertEqual(ctx.get_resource_usage().db_txn_count, 0) def test_query_via_event_cache(self): # fetch an event into the event cache - self.get_success(self.store.get_event("event10")) + self.get_success(self.store.get_event(self.event_ids[0])) # looking it up should now cause no db hits with LoggingContext(name="test") as ctx: - res = self.get_success(self.store.have_seen_events("room1", ["event10"])) - self.assertEqual(res, {"event10"}) + res = self.get_success( + self.store.have_seen_events("room1", [self.event_ids[0]]) + ) + self.assertEqual(res, {self.event_ids[0]}) self.assertEqual(ctx.get_resource_usage().db_txn_count, 0) @@ -167,7 +176,6 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): self.store: EventsWorkerStore = hs.get_datastores().main self.room_id = f"!room:{hs.hostname}" - self.event_ids = [f"event{i}" for i in range(20)] self._populate_events() @@ -190,8 +198,14 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): ) ) - self.event_ids = [f"event{i}" for i in range(20)] - for idx, event_id in enumerate(self.event_ids): + self.event_ids: List[str] = [] + for idx in range(20): + event_json = { + "type": f"test {idx}", + "room_id": self.room_id, + } + event = make_event_from_dict(event_json, room_version=RoomVersions.V4) + event_id = event.event_id self.get_success( self.store.db_pool.simple_upsert( "events", @@ -201,7 +215,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): "room_id": self.room_id, "topological_ordering": idx, "stream_ordering": idx, - "type": "test", + "type": event.type, "processed": True, "outlier": False, }, @@ -213,12 +227,13 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): {"event_id": event_id}, { "room_id": self.room_id, - "json": json.dumps({"type": "test", "room_id": self.room_id}), + "json": json.dumps(event_json), "internal_metadata": "{}", "format_version": EventFormatVersions.V3, }, ) ) + self.event_ids.append(event_id) @contextmanager def _outage(self) -> Generator[None, None, None]: From 873d467976fef7a58e54b033ee114849d547c793 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 4 May 2022 11:02:19 +0100 Subject: [PATCH 34/87] Fixes to the formatting of README.rst (#12627) Fixes a couple of formatting errors which were introduced in #12475. --- README.rst | 14 +++++++------- changelog.d/12627.doc | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12627.doc diff --git a/README.rst b/README.rst index 80201d4eb1..219e32de8e 100644 --- a/README.rst +++ b/README.rst @@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new generation of fully open and interoperable messaging and VoIP apps for the internet. -Synapse is a Matrix "homeserver" implementation developed by the matrix.org core +Synapse is a Matrix "homeserver" implementation developed by the matrix.org core team, written in Python 3/Twisted. In Matrix, every user runs one or more Matrix clients, which connect through to @@ -294,13 +294,13 @@ directory of your choice:: cd synapse Synapse has a number of external dependencies. We maintain a fixed development -environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend +environment using `Poetry `_. First, install poetry. We recommend:: - | pip install --user pipx - | pipx install poetry + pip install --user pipx + pipx install poetry as described `here `_. -(See `poetry's installation docs ` +(See `poetry's installation docs `_ for other installation methods.) Then ask poetry to create a virtual environment from the project and install Synapse's dependencies:: @@ -309,11 +309,11 @@ from the project and install Synapse's dependencies:: This will run a process of downloading and installing all the needed dependencies into a virtual env. -We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082` +We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`:: poetry run ./demo/start.sh -(to stop, you can use `poetry run ./demo/stop.sh`) +(to stop, you can use ``poetry run ./demo/stop.sh``) See the `demo documentation `_ for more information. diff --git a/changelog.d/12627.doc b/changelog.d/12627.doc new file mode 100644 index 0000000000..3a787dfef2 --- /dev/null +++ b/changelog.d/12627.doc @@ -0,0 +1 @@ +Fixes to the formatting of README.rst. From 01e625513afbd9645ff642810d559f96fab13278 Mon Sep 17 00:00:00 2001 From: andrew do Date: Wed, 4 May 2022 04:26:11 -0700 Subject: [PATCH 35/87] remove constantly lib use and switch to enums. (#12624) --- changelog.d/12624.misc | 1 + mypy.ini | 3 -- synapse/handlers/events.py | 2 +- synapse/handlers/federation.py | 4 +- synapse/handlers/federation_event.py | 2 +- synapse/handlers/message.py | 4 +- synapse/state/__init__.py | 2 +- .../storage/databases/main/events_worker.py | 42 +++++++++---------- synapse/storage/databases/main/search.py | 8 ++-- synapse/storage/databases/main/signatures.py | 2 +- synapse/storage/persist_events.py | 4 +- 11 files changed, 36 insertions(+), 38 deletions(-) create mode 100644 changelog.d/12624.misc diff --git a/changelog.d/12624.misc b/changelog.d/12624.misc new file mode 100644 index 0000000000..8772d40fa7 --- /dev/null +++ b/changelog.d/12624.misc @@ -0,0 +1 @@ +Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh. diff --git a/mypy.ini b/mypy.ini index 78699e3187..ba0de419f5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -244,9 +244,6 @@ ignore_missing_imports = True [mypy-canonicaljson] ignore_missing_imports = True -[mypy-constantly] -ignore_missing_imports = True - [mypy-ijson.*] ignore_missing_imports = True diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 5b94b00bc3..82a5aac3dd 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -164,7 +164,7 @@ class EventHandler: event. """ redact_behaviour = ( - EventRedactBehaviour.AS_IS if show_redacted else EventRedactBehaviour.REDACT + EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact ) event = await self.store.get_event( event_id, check_room_id=room_id, redact_behaviour=redact_behaviour diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d2ba70a814..38dc5b1f6e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -316,7 +316,7 @@ class FederationHandler: events_to_check = await self.store.get_events_as_list( event_ids_to_check, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, get_prev_content=False, ) @@ -1494,7 +1494,7 @@ class FederationHandler: events = await self.store.get_events_as_list( batch, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, allow_rejected=True, ) for event in events: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 693b544286..6cf927e4ff 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -860,7 +860,7 @@ class FederationEventHandler: evs = await self._store.get_events( list(state_map.values()), get_prev_content=False, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, ) event_map.update(evs) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1b092e900e..95a89ac01f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1407,7 +1407,7 @@ class EventCreationHandler: original_event = await self.store.get_event( event.redacts, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, get_prev_content=False, allow_rejected=False, allow_none=True, @@ -1504,7 +1504,7 @@ class EventCreationHandler: original_event = await self.store.get_event( event.redacts, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, get_prev_content=False, allow_rejected=False, allow_none=True, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fbf7ba4600..cad3b42640 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -800,7 +800,7 @@ class StateResolutionStore: return self.store.get_events( event_ids, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, get_prev_content=False, allow_rejected=allow_rejected, ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0a48e5d29f..a4a604a499 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -14,6 +14,7 @@ import logging import threading +from enum import Enum, auto from typing import ( TYPE_CHECKING, Any, @@ -30,7 +31,6 @@ from typing import ( ) import attr -from constantly import NamedConstant, Names from prometheus_client import Gauge from typing_extensions import Literal @@ -150,14 +150,14 @@ class _EventRow: outlier: bool -class EventRedactBehaviour(Names): +class EventRedactBehaviour(Enum): """ What to do when retrieving a redacted event from the database. """ - AS_IS = NamedConstant() - REDACT = NamedConstant() - BLOCK = NamedConstant() + as_is = auto() + redact = auto() + block = auto() class EventsWorkerStore(SQLBaseStore): @@ -327,7 +327,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_event( self, event_id: str, - redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = ..., allow_rejected: bool = ..., allow_none: Literal[False] = ..., @@ -339,7 +339,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_event( self, event_id: str, - redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = ..., allow_rejected: bool = ..., allow_none: Literal[True] = ..., @@ -350,7 +350,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_event( self, event_id: str, - redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = False, allow_rejected: bool = False, allow_none: bool = False, @@ -362,9 +362,9 @@ class EventsWorkerStore(SQLBaseStore): event_id: The event_id of the event to fetch redact_behaviour: Determine what to do with a redacted event. Possible values: - * AS_IS - Return the full event body with no redacted content - * REDACT - Return the event but with a redacted body - * DISALLOW - Do not return redacted events (behave as per allow_none + * as_is - Return the full event body with no redacted content + * redact - Return the event but with a redacted body + * block - Do not return redacted events (behave as per allow_none if the event is redacted) get_prev_content: If True and event is a state event, @@ -406,7 +406,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_events( self, event_ids: Collection[str], - redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = False, allow_rejected: bool = False, ) -> Dict[str, EventBase]: @@ -417,9 +417,9 @@ class EventsWorkerStore(SQLBaseStore): redact_behaviour: Determine what to do with a redacted event. Possible values: - * AS_IS - Return the full event body with no redacted content - * REDACT - Return the event but with a redacted body - * DISALLOW - Do not return redacted events (omit them from the response) + * as_is - Return the full event body with no redacted content + * redact - Return the event but with a redacted body + * block - Do not return redacted events (omit them from the response) get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. @@ -442,7 +442,7 @@ class EventsWorkerStore(SQLBaseStore): async def get_events_as_list( self, event_ids: Collection[str], - redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact, get_prev_content: bool = False, allow_rejected: bool = False, ) -> List[EventBase]: @@ -455,9 +455,9 @@ class EventsWorkerStore(SQLBaseStore): event_ids: The event_ids of the events to fetch redact_behaviour: Determine what to do with a redacted event. Possible values: - * AS_IS - Return the full event body with no redacted content - * REDACT - Return the event but with a redacted body - * DISALLOW - Do not return redacted events (omit them from the response) + * as_is - Return the full event body with no redacted content + * redact - Return the event but with a redacted body + * block - Do not return redacted events (omit them from the response) get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. @@ -568,10 +568,10 @@ class EventsWorkerStore(SQLBaseStore): event = entry.event if entry.redacted_event: - if redact_behaviour == EventRedactBehaviour.BLOCK: + if redact_behaviour == EventRedactBehaviour.block: # Skip this event continue - elif redact_behaviour == EventRedactBehaviour.REDACT: + elif redact_behaviour == EventRedactBehaviour.redact: event = entry.redacted_event events.append(event) diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index d4482c06db..3c49e7ec98 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -494,11 +494,11 @@ class SearchStore(SearchBackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - # We set redact_behaviour to BLOCK here to prevent redacted events being returned in + # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] [r["event_id"] for r in results], - redact_behaviour=EventRedactBehaviour.BLOCK, + redact_behaviour=EventRedactBehaviour.block, ) event_map = {ev.event_id: ev for ev in events} @@ -652,11 +652,11 @@ class SearchStore(SearchBackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - # We set redact_behaviour to BLOCK here to prevent redacted events being returned in + # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] [r["event_id"] for r in results], - redact_behaviour=EventRedactBehaviour.BLOCK, + redact_behaviour=EventRedactBehaviour.block, ) event_map = {ev.event_id: ev for ev in events} diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index 95148fd227..05da15074a 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -48,7 +48,7 @@ class SignatureWorkerStore(EventsWorkerStore): """ events = await self.get_events( event_ids, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, allow_rejected=True, ) diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index e496ba7bed..97118045a1 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -943,7 +943,7 @@ class EventsPersistenceStorage: dropped_events = await self.main_store.get_events( dropped_extrems, allow_rejected=True, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, ) new_senders = {get_domain_from_id(e.sender) for e, _ in events_context} @@ -974,7 +974,7 @@ class EventsPersistenceStorage: prev_events = await self.main_store.get_events( new_events, allow_rejected=True, - redact_behaviour=EventRedactBehaviour.AS_IS, + redact_behaviour=EventRedactBehaviour.as_is, ) events_to_check = prev_events.values() From 75dff3dc980974960f55fa21fc8e672201f63045 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 May 2022 08:38:18 -0400 Subject: [PATCH 36/87] Include bundled aggregations for the latest event in a thread. (#12273) The `latest_event` field of the bundled aggregations for `m.thread` relations did not include bundled aggregations itself. This resulted in clients needing to immediately request the event from the server (and thus making it useless that the latest event itself was serialized instead of just including an event ID). --- changelog.d/12273.bugfix | 1 + synapse/events/utils.py | 55 ++++++---- synapse/handlers/relations.py | 72 +++++++++---- synapse/storage/databases/main/relations.py | 11 +- tests/rest/client/test_relations.py | 108 +++++++++++++++++++- 5 files changed, 197 insertions(+), 50 deletions(-) create mode 100644 changelog.d/12273.bugfix diff --git a/changelog.d/12273.bugfix b/changelog.d/12273.bugfix new file mode 100644 index 0000000000..f8d7b6c889 --- /dev/null +++ b/changelog.d/12273.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a6c48308b3..026dcde8d8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -426,13 +426,12 @@ class EventClientSerializer: # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: - event_aggregations = bundle_aggregations.get(event.event_id) - if event_aggregations: + if event.event_id in bundle_aggregations: self._inject_bundled_aggregations( event, time_now, config, - event_aggregations, + bundle_aggregations, serialized_event, apply_edits=apply_edits, ) @@ -471,7 +470,7 @@ class EventClientSerializer: event: EventBase, time_now: int, config: SerializeEventConfig, - aggregations: "BundledAggregations", + bundled_aggregations: Dict[str, "BundledAggregations"], serialized_event: JsonDict, apply_edits: bool, ) -> None: @@ -481,22 +480,37 @@ class EventClientSerializer: event: The event being serialized. time_now: The current time in milliseconds config: Event serialization config - aggregations: The bundled aggregation to serialize. + bundled_aggregations: Bundled aggregations to be injected. + A map from event_id to aggregation data. Must contain at least an + entry for `event`. + + While serializing the bundled aggregations this map may be searched + again for additional events in a recursive manner. serialized_event: The serialized event which may be modified. apply_edits: Whether the content of the event should be modified to reflect any replacement in `aggregations.replace`. """ + + # We have already checked that aggregations exist for this event. + event_aggregations = bundled_aggregations[event.event_id] + + # The JSON dictionary to be added under the unsigned property of the event + # being serialized. serialized_aggregations = {} - if aggregations.annotations: - serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations + if event_aggregations.annotations: + serialized_aggregations[ + RelationTypes.ANNOTATION + ] = event_aggregations.annotations - if aggregations.references: - serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references + if event_aggregations.references: + serialized_aggregations[ + RelationTypes.REFERENCE + ] = event_aggregations.references - if aggregations.replace: + if event_aggregations.replace: # If there is an edit, optionally apply it to the event. - edit = aggregations.replace + edit = event_aggregations.replace if apply_edits: self._apply_edit(event, serialized_event, edit) @@ -507,19 +521,16 @@ class EventClientSerializer: "sender": edit.sender, } - # If this event is the start of a thread, include a summary of the replies. - if aggregations.thread: - thread = aggregations.thread + # Include any threaded replies to this event. + if event_aggregations.thread: + thread = event_aggregations.thread - # Don't bundle aggregations as this could recurse forever. - serialized_latest_event = serialize_event( - thread.latest_event, time_now, config=config + serialized_latest_event = self.serialize_event( + thread.latest_event, + time_now, + config=config, + bundle_aggregations=bundled_aggregations, ) - # Manually apply an edit, if one exists. - if thread.latest_edit: - self._apply_edit( - thread.latest_event, serialized_latest_event, thread.latest_edit - ) thread_summary = { "latest_event": serialized_latest_event, diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index b5dc9f74b3..cec5740fbd 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -44,8 +44,6 @@ logger = logging.getLogger(__name__) class _ThreadAggregation: # The latest event in the thread. latest_event: EventBase - # The latest edit to the latest event in the thread. - latest_edit: Optional[EventBase] # The total number of events in the thread. count: int # True if the current user has sent an event to the thread. @@ -295,7 +293,7 @@ class RelationsHandler: for event_id, summary in summaries.items(): if summary: - thread_count, latest_thread_event, edit = summary + thread_count, latest_thread_event = summary # Subtract off the count of any ignored users. for ignored_user in ignored_users: @@ -340,7 +338,6 @@ class RelationsHandler: results[event_id] = _ThreadAggregation( latest_event=latest_thread_event, - latest_edit=edit, count=thread_count, # If there's a thread summary it must also exist in the # participated dictionary. @@ -359,8 +356,13 @@ class RelationsHandler: user_id: The user requesting the bundled aggregations. Returns: - A map of event ID to the bundled aggregation for the event. Not all - events may have bundled aggregations in the results. + A map of event ID to the bundled aggregations for the event. + + Not all requested events may exist in the results (if they don't have + bundled aggregations). + + The results may include additional events which are related to the + requested events. """ # De-duplicate events by ID to handle the same event requested multiple times. # @@ -369,22 +371,59 @@ class RelationsHandler: event.event_id: event for event in events if not event.is_state() } + # A map of event ID to the relation in that event, if there is one. + relations_by_id: Dict[str, str] = {} + for event_id, event in events_by_id.items(): + relates_to = event.content.get("m.relates_to") + if isinstance(relates_to, collections.abc.Mapping): + relation_type = relates_to.get("rel_type") + if isinstance(relation_type, str): + relations_by_id[event_id] = relation_type + # event ID -> bundled aggregation in non-serialized form. results: Dict[str, BundledAggregations] = {} # Fetch any ignored users of the requesting user. ignored_users = await self._main_store.ignored_users(user_id) + # Threads are special as the latest event of a thread might cause additional + # events to be fetched. Thus, we check those first! + + # Fetch thread summaries (but only for the directly requested events). + threads = await self.get_threads_for_events( + # It is not valid to start a thread on an event which itself relates to another event. + [eid for eid in events_by_id.keys() if eid not in relations_by_id], + user_id, + ignored_users, + ) + for event_id, thread in threads.items(): + results.setdefault(event_id, BundledAggregations()).thread = thread + + # If the latest event in a thread is not already being fetched, + # add it. This ensures that the bundled aggregations for the + # latest thread event is correct. + latest_thread_event = thread.latest_event + if latest_thread_event and latest_thread_event.event_id not in events_by_id: + events_by_id[latest_thread_event.event_id] = latest_thread_event + # Keep relations_by_id in sync with events_by_id: + # + # We know that the latest event in a thread has a thread relation + # (as that is what makes it part of the thread). + relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD + # Fetch other relations per event. for event in events_by_id.values(): - # Do not bundle aggregations for an event which represents an edit or an - # annotation. It does not make sense for them to have related events. - relates_to = event.content.get("m.relates_to") - if isinstance(relates_to, collections.abc.Mapping): - relation_type = relates_to.get("rel_type") - if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): - continue + # An event which is a replacement (ie edit) or annotation (ie, reaction) + # may not have any other event related to it. + # + # XXX This is buggy, see https://github.com/matrix-org/synapse/issues/12566 + if relations_by_id.get(event.event_id) in ( + RelationTypes.ANNOTATION, + RelationTypes.REPLACE, + ): + continue + # Fetch any annotations (ie, reactions) to bundle with this event. annotations = await self.get_annotations_for_event( event.event_id, event.room_id, ignored_users=ignored_users ) @@ -393,6 +432,7 @@ class RelationsHandler: event.event_id, BundledAggregations() ).annotations = {"chunk": annotations} + # Fetch any references to bundle with this event. references, next_token = await self.get_relations_for_event( event.event_id, event, @@ -425,10 +465,4 @@ class RelationsHandler: for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit - threads = await self.get_threads_for_events( - events_by_id.keys(), user_id, ignored_users - ) - for event_id, thread in threads.items(): - results.setdefault(event_id, BundledAggregations()).thread = thread - return results diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index a5c31f6787..484976ca6b 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -445,8 +445,8 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") async def get_thread_summaries( self, event_ids: Collection[str] - ) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]: - """Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event. + ) -> Dict[str, Optional[Tuple[int, EventBase]]]: + """Get the number of threaded replies and the latest reply (if any) for the given events. Args: event_ids: Summarize the thread related to this event ID. @@ -458,7 +458,6 @@ class RelationsWorkerStore(SQLBaseStore): Each summary is a tuple of: The number of events in the thread. The most recent event in the thread. - The most recent edit to the most recent event in the thread, if applicable. """ def _get_thread_summaries_txn( @@ -544,9 +543,6 @@ class RelationsWorkerStore(SQLBaseStore): latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined] - # Check to see if any of those events are edited. - latest_edits = await self.get_applicable_edits(latest_event_ids.values()) - # Map to the event IDs to the thread summary. # # There might not be a summary due to there not being a thread or @@ -557,8 +553,7 @@ class RelationsWorkerStore(SQLBaseStore): summary = None if latest_event: - latest_edit = latest_edits.get(latest_event_id) - summary = (counts[parent_event_id], latest_event, latest_edit) + summary = (counts[parent_event_id], latest_event) summaries[parent_event_id] = summary return summaries diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 39667e3225..33ce9471b3 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1029,7 +1029,106 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): bundled_aggregations.get("latest_event"), ) - self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9) + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10) + + def test_thread_with_bundled_aggregations_for_latest(self) -> None: + """ + Bundled aggregations should get applied to the latest thread event. + """ + self._send_relation(RelationTypes.THREAD, "m.room.test") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] + + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 + ) + + def assert_thread(bundled_aggregations: JsonDict) -> None: + self.assertEqual(2, bundled_aggregations.get("count")) + self.assertTrue(bundled_aggregations.get("current_user_participated")) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + bundled_aggregations.get("latest_event"), + ) + # Check the unsigned field on the latest event. + self.assert_dict( + { + "m.relations": { + RelationTypes.ANNOTATION: { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 1}, + ] + }, + } + }, + bundled_aggregations["latest_event"].get("unsigned"), + ) + + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10) + + def test_nested_thread(self) -> None: + """ + Ensure that a nested thread gets ignored by bundled aggregations, as + those are forbidden. + """ + + # Start a thread. + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + reply_event_id = channel.json_body["event_id"] + + # Disable the validation to pretend this came over federation, since it is + # not an event the Client-Server API will allow.. + with patch( + "synapse.handlers.message.EventCreationHandler._validate_event_relation", + new=lambda self, event: make_awaitable(None), + ): + # Create a sub-thread off the thread, which is not allowed. + self._send_relation( + RelationTypes.THREAD, "m.room.test", parent_id=reply_event_id + ) + + # Fetch the thread root, to get the bundled aggregation for the thread. + relations_from_event = self._get_bundled_aggregations() + + # Ensure that requesting the room messages also does not return the sub-thread. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + event = self._find_event_in_chunk(channel.json_body["chunk"]) + relations_from_messages = event["unsigned"]["m.relations"] + + # Check the bundled aggregations from each point. + for aggregations, desc in ( + (relations_from_event, "/event"), + (relations_from_messages, "/messages"), + ): + # The latest event should have bundled aggregations. + self.assertIn(RelationTypes.THREAD, aggregations, desc) + thread_summary = aggregations[RelationTypes.THREAD] + self.assertIn("latest_event", thread_summary, desc) + self.assertEqual( + thread_summary["latest_event"]["event_id"], reply_event_id, desc + ) + + # The latest event should not have any bundled aggregations (since the + # only relation to it is another thread, which is invalid). + self.assertNotIn( + "m.relations", thread_summary["latest_event"]["unsigned"], desc + ) def test_thread_edit_latest_event(self) -> None: """Test that editing the latest event in a thread works.""" @@ -1049,6 +1148,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, parent_id=threaded_event_id, ) + edit_event_id = channel.json_body["event_id"] # Fetch the thread root, to get the bundled aggregation for the thread. relations_dict = self._get_bundled_aggregations() @@ -1061,6 +1161,12 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): self.assertIn("latest_event", thread_summary) latest_event_in_thread = thread_summary["latest_event"] self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + # The latest event in the thread should have the edit appear under the + # bundled aggregations. + self.assertDictContainsSubset( + {"event_id": edit_event_id, "sender": "@alice:test"}, + latest_event_in_thread["unsigned"]["m.relations"][RelationTypes.REPLACE], + ) def test_aggregation_get_event_for_annotation(self) -> None: """Test that annotations do not get bundled aggregations included From b2df0716bc0cf31b5f5f90a0599bc1d04a837e27 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 4 May 2022 13:38:55 +0100 Subject: [PATCH 37/87] Improve logging for cancelled requests (#12587) Don't log stack traces for cancelled requests and use a custom HTTP status code of 499. Signed-off-by: Sean Quah --- changelog.d/12587.misc | 1 + docs/usage/administration/request_log.md | 2 +- synapse/http/server.py | 30 ++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12587.misc diff --git a/changelog.d/12587.misc b/changelog.d/12587.misc new file mode 100644 index 0000000000..d26e332305 --- /dev/null +++ b/changelog.d/12587.misc @@ -0,0 +1 @@ +Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. diff --git a/docs/usage/administration/request_log.md b/docs/usage/administration/request_log.md index 316304c734..adb5f4f5f3 100644 --- a/docs/usage/administration/request_log.md +++ b/docs/usage/administration/request_log.md @@ -28,7 +28,7 @@ See the following for how to decode the dense data available from the default lo | NNNN | Total time waiting for response to DB queries across all parallel DB work from this request | | OOOO | Count of DB transactions performed | | PPPP | Response body size | -| QQQQ | Response status code (prefixed with ! if the socket was closed before the response was generated) | +| QQQQ | Response status code
Suffixed with `!` if the socket was closed before the response was generated.
A `499!` status code indicates that Synapse also cancelled request processing after the socket was closed.
| | RRRR | Request | | SSSS | User-agent | | TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) | diff --git a/synapse/http/server.py b/synapse/http/server.py index 1cf49830e8..657bffcddd 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -43,6 +43,7 @@ from typing_extensions import Protocol from zope.interface import implementer from twisted.internet import defer, interfaces +from twisted.internet.defer import CancelledError from twisted.python import failure from twisted.web import resource from twisted.web.server import NOT_DONE_YET, Request @@ -82,6 +83,14 @@ HTML_ERROR_TEMPLATE = """ """ +# A fictional HTTP status code for requests where the client has disconnected and we +# successfully cancelled the request. Used only for logging purposes. Clients will never +# observe this code unless cancellations leak across requests or we raise a +# `CancelledError` ourselves. +# Analogous to nginx's 499 status code: +# https://github.com/nginx/nginx/blob/release-1.21.6/src/http/ngx_http_request.h#L128-L134 +HTTP_STATUS_REQUEST_CANCELLED = 499 + def return_json_error(f: failure.Failure, request: SynapseRequest) -> None: """Sends a JSON error response to clients.""" @@ -93,6 +102,17 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None: error_dict = exc.error_dict() logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) + elif f.check(CancelledError): + error_code = HTTP_STATUS_REQUEST_CANCELLED + error_dict = {"error": "Request cancelled", "errcode": Codes.UNKNOWN} + + if not request._disconnected: + logger.error( + "Got cancellation before client disconnection from %r: %r", + request.request_metrics.name, + request, + exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + ) else: error_code = 500 error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN} @@ -155,6 +175,16 @@ def return_html_error( request, exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] ) + elif f.check(CancelledError): + code = HTTP_STATUS_REQUEST_CANCELLED + msg = "Request cancelled" + + if not request._disconnected: + logger.error( + "Got cancellation before client disconnection when handling request %r", + request, + exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + ) else: code = HTTPStatus.INTERNAL_SERVER_ERROR msg = "Internal server error" From ba3fd54badb0e11d4dc79488e2ab3099fc03a1e8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 May 2022 09:53:21 -0400 Subject: [PATCH 38/87] Remove unstable/unspecced login types. (#12597) * `m.login.jwt`, which was never specced and has been deprecated since Synapse 1.16.0. (`org.matrix.login.jwt` can be used instead.) * `uk.half-shot.msc2778.login.application_service`, which was stabilized as part of the Matrix spec v1.2 release. --- CHANGES.md | 6 ++++++ changelog.d/12597.removal | 2 ++ docs/jwt.md | 3 --- synapse/rest/client/login.py | 15 ++++----------- tests/handlers/test_password_providers.py | 4 +--- tests/rest/client/test_login.py | 4 +--- 6 files changed, 14 insertions(+), 20 deletions(-) create mode 100644 changelog.d/12597.removal diff --git a/CHANGES.md b/CHANGES.md index 31f1561274..b4d91b2793 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.59.0 +============== + +The non-standard `m.login.jwt` login type has been removed from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. + + Synapse 1.58.0 (2022-05-03) =========================== diff --git a/changelog.d/12597.removal b/changelog.d/12597.removal new file mode 100644 index 0000000000..7927f1d68d --- /dev/null +++ b/changelog.d/12597.removal @@ -0,0 +1,2 @@ +Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from +[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). diff --git a/docs/jwt.md b/docs/jwt.md index 32f58cc0cb..346daf78ad 100644 --- a/docs/jwt.md +++ b/docs/jwt.md @@ -17,9 +17,6 @@ follows: } ``` -Note that the login type of `m.login.jwt` is supported, but is deprecated. This -will be removed in a future version of Synapse. - The `token` field should include the JSON web token with the following claims: * A claim that encodes the local part of the user ID is required. By default, diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 4a4dbe75de..71d8038448 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -69,9 +69,7 @@ class LoginRestServlet(RestServlet): SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" JWT_TYPE = "org.matrix.login.jwt" - JWT_TYPE_DEPRECATED = "m.login.jwt" APPSERVICE_TYPE = "m.login.application_service" - APPSERVICE_TYPE_UNSTABLE = "uk.half-shot.msc2778.login.application_service" REFRESH_TOKEN_PARAM = "refresh_token" def __init__(self, hs: "HomeServer"): @@ -126,7 +124,6 @@ class LoginRestServlet(RestServlet): flows: List[JsonDict] = [] if self.jwt_enabled: flows.append({"type": LoginRestServlet.JWT_TYPE}) - flows.append({"type": LoginRestServlet.JWT_TYPE_DEPRECATED}) if self.cas_enabled: # we advertise CAS for backwards compat, though MSC1721 renamed it @@ -156,7 +153,6 @@ class LoginRestServlet(RestServlet): flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types()) flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) - flows.append({"type": LoginRestServlet.APPSERVICE_TYPE_UNSTABLE}) return 200, {"flows": flows} @@ -175,10 +171,7 @@ class LoginRestServlet(RestServlet): ) try: - if login_submission["type"] in ( - LoginRestServlet.APPSERVICE_TYPE, - LoginRestServlet.APPSERVICE_TYPE_UNSTABLE, - ): + if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: appservice = self.auth.get_appservice_by_req(request) if appservice.is_rate_limited(): @@ -191,9 +184,9 @@ class LoginRestServlet(RestServlet): appservice, should_issue_refresh_token=should_issue_refresh_token, ) - elif self.jwt_enabled and ( - login_submission["type"] == LoginRestServlet.JWT_TYPE - or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED + elif ( + self.jwt_enabled + and login_submission["type"] == LoginRestServlet.JWT_TYPE ): await self._address_ratelimiter.ratelimit(None, request.getClientIP()) result = await self._do_jwt_login( diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index addf14fa2b..82b3bb3b73 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -30,11 +30,9 @@ from tests.server import FakeChannel from tests.test_utils import make_awaitable from tests.unittest import override_config -# (possibly experimental) login flows we expect to appear in the list after the normal -# ones +# Login flows we expect to appear in the list after the normal ones. ADDITIONAL_LOGIN_FLOWS = [ {"type": "m.login.application_service"}, - {"type": "uk.half-shot.msc2778.login.application_service"}, ] # a mock instance which the dummy auth providers delegate to, so we can see what's going diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 0a3d017dc9..4920468f7a 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -81,11 +81,9 @@ TEST_CLIENT_REDIRECT_URL = 'https://x?&q"+%3D%2B"="fö%26=o"' # the query params in TEST_CLIENT_REDIRECT_URL EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("", ""), ('q" =+"', '"fö&=o"')] -# (possibly experimental) login flows we expect to appear in the list after the normal -# ones +# Login flows we expect to appear in the list after the normal ones. ADDITIONAL_LOGIN_FLOWS = [ {"type": "m.login.application_service"}, - {"type": "uk.half-shot.msc2778.login.application_service"}, ] From 332cce8dcf9c28314f568c290b57e98036a0e723 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 4 May 2022 16:41:40 +0100 Subject: [PATCH 39/87] Disable device name lookup over federation by default (#12616) --- changelog.d/12616.misc | 1 + docs/sample_config.yaml | 8 ++++---- docs/upgrade.md | 11 +++++++++++ docs/usage/configuration/config_documentation.md | 6 +++--- synapse/config/federation.py | 10 +++++----- 5 files changed, 24 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12616.misc diff --git a/changelog.d/12616.misc b/changelog.d/12616.misc new file mode 100644 index 0000000000..d17ce24cdf --- /dev/null +++ b/changelog.d/12616.misc @@ -0,0 +1 @@ +Prevent remote homeservers from requesting local user device names by default. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 67184c6b1a..5eba0fcf3d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -709,11 +709,11 @@ retention: # #allow_profile_lookup_over_federation: false -# Uncomment to disable device display name lookup over federation. By default, the -# Federation API allows other homeservers to obtain device display names of any user -# on this homeserver. Defaults to 'true'. +# Uncomment to allow device display name lookup over federation. By default, the +# Federation API prevents other homeservers from obtaining the display names of +# user devices on this homeserver. Defaults to 'false'. # -#allow_device_name_lookup_over_federation: false +#allow_device_name_lookup_over_federation: true ## Caching ## diff --git a/docs/upgrade.md b/docs/upgrade.md index 3a8aeb0395..b40cac86f0 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.59.0 + +## Device name lookup over federation has been disabled by default + +The names of user devices are no longer visible to users on other homeservers by default. +Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption. + +To re-enable this functionality, set the +[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation) +homeserver config option to `true`. + # Upgrading to v1.58.0 ## Groups/communities feature has been disabled by default diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 968b0fbfaf..36db649467 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1035,13 +1035,13 @@ allow_profile_lookup_over_federation: false --- Config option: `allow_device_name_lookup_over_federation` -Set this option to false to disable device display name lookup over federation. By default, the -Federation API allows other homeservers to obtain device display names of any user +Set this option to true to allow device display name lookup over federation. By default, the +Federation API prevents other homeservers from obtaining the display names of any user devices on this homeserver. Example configuration: ```yaml -allow_device_name_lookup_over_federation: false +allow_device_name_lookup_over_federation: true ``` --- ## Caching ## diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 0e74f70784..f83f93c0ef 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -46,7 +46,7 @@ class FederationConfig(Config): ) self.allow_device_name_lookup_over_federation = config.get( - "allow_device_name_lookup_over_federation", True + "allow_device_name_lookup_over_federation", False ) def generate_config_section(self, **kwargs: Any) -> str: @@ -81,11 +81,11 @@ class FederationConfig(Config): # #allow_profile_lookup_over_federation: false - # Uncomment to disable device display name lookup over federation. By default, the - # Federation API allows other homeservers to obtain device display names of any user - # on this homeserver. Defaults to 'true'. + # Uncomment to allow device display name lookup over federation. By default, the + # Federation API prevents other homeservers from obtaining the display names of + # user devices on this homeserver. Defaults to 'false'. # - #allow_device_name_lookup_over_federation: false + #allow_device_name_lookup_over_federation: true """ From 116a4c8340b729ffde43be33df24d417384cb28b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Wed, 4 May 2022 17:59:22 +0200 Subject: [PATCH 40/87] Implement changes to MSC2285 (hidden read receipts) (#12168) * Changes hidden read receipts to be a separate receipt type (instead of a field on `m.read`). * Updates the `/receipts` endpoint to accept `m.fully_read`. --- changelog.d/12168.feature | 1 + synapse/api/constants.py | 6 +- synapse/handlers/receipts.py | 63 ++--- synapse/handlers/sync.py | 2 +- synapse/push/push_tools.py | 4 +- synapse/rest/client/notifications.py | 2 +- synapse/rest/client/read_marker.py | 32 ++- synapse/rest/client/receipts.py | 51 ++-- synapse/storage/databases/main/receipts.py | 142 +++++++--- tests/handlers/test_receipts.py | 129 ++++++---- .../slave/storage/test_receipts.py | 242 +++++++++++++++++- tests/rest/client/test_sync.py | 161 ++++++++++-- 12 files changed, 648 insertions(+), 187 deletions(-) create mode 100644 changelog.d/12168.feature diff --git a/changelog.d/12168.feature b/changelog.d/12168.feature new file mode 100644 index 0000000000..cd5c45029e --- /dev/null +++ b/changelog.d/12168.feature @@ -0,0 +1 @@ +Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 0172eb60b8..0ccd4c9558 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -255,7 +255,5 @@ class GuestAccess: class ReceiptTypes: READ: Final = "m.read" - - -class ReadReceiptEventFields: - MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden" + READ_PRIVATE: Final = "org.matrix.msc2285.read.private" + FULLY_READ: Final = "m.fully_read" diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index cfe860decc..ae41fd674e 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -14,7 +14,7 @@ import logging from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple -from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes +from synapse.api.constants import ReceiptTypes from synapse.appservice import ApplicationService from synapse.streams import EventSource from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id @@ -112,7 +112,7 @@ class ReceiptsHandler: ) if not res: - # res will be None if this read receipt is 'old' + # res will be None if this receipt is 'old' continue stream_id, max_persisted_id = res @@ -138,7 +138,7 @@ class ReceiptsHandler: return True async def received_client_receipt( - self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool + self, room_id: str, receipt_type: str, user_id: str, event_id: str ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. @@ -148,16 +148,14 @@ class ReceiptsHandler: receipt_type=receipt_type, user_id=user_id, event_ids=[event_id], - data={"ts": int(self.clock.time_msec()), "hidden": hidden}, + data={"ts": int(self.clock.time_msec())}, ) is_new = await self._handle_new_receipts([receipt]) if not is_new: return - if self.federation_sender and not ( - self.hs.config.experimental.msc2285_enabled and hidden - ): + if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE: await self.federation_sender.send_read_receipt(receipt) @@ -168,6 +166,13 @@ class ReceiptEventSource(EventSource[int, JsonDict]): @staticmethod def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]: + """ + This method takes in what is returned by + get_linearized_receipts_for_rooms() and goes through read receipts + filtering out m.read.private receipts if they were not sent by the + current user. + """ + visible_events = [] # filter out hidden receipts the user shouldn't see @@ -176,37 +181,21 @@ class ReceiptEventSource(EventSource[int, JsonDict]): new_event = event.copy() new_event["content"] = {} - for event_id in content.keys(): - event_content = content.get(event_id, {}) - m_read = event_content.get(ReceiptTypes.READ, {}) + for event_id, event_content in content.items(): + receipt_event = {} + for receipt_type, receipt_content in event_content.items(): + if receipt_type == ReceiptTypes.READ_PRIVATE: + user_rr = receipt_content.get(user_id, None) + if user_rr: + receipt_event[ReceiptTypes.READ_PRIVATE] = { + user_id: user_rr.copy() + } + else: + receipt_event[receipt_type] = receipt_content.copy() - # If m_read is missing copy over the original event_content as there is nothing to process here - if not m_read: - new_event["content"][event_id] = event_content.copy() - continue - - new_users = {} - for rr_user_id, user_rr in m_read.items(): - try: - hidden = user_rr.get("hidden") - except AttributeError: - # Due to https://github.com/matrix-org/synapse/issues/10376 - # there are cases where user_rr is a string, in those cases - # we just ignore the read receipt - continue - - if hidden is not True or rr_user_id == user_id: - new_users[rr_user_id] = user_rr.copy() - # If hidden has a value replace hidden with the correct prefixed key - if hidden is not None: - new_users[rr_user_id].pop("hidden") - new_users[rr_user_id][ - ReadReceiptEventFields.MSC2285_HIDDEN - ] = hidden - - # Set new users unless empty - if len(new_users.keys()) > 0: - new_event["content"][event_id] = {ReceiptTypes.READ: new_users} + # Only include the receipt event if it is non-empty. + if receipt_event: + new_event["content"][event_id] = receipt_event # Append new_event to visible_events unless empty if len(new_event["content"].keys()) > 0: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5125126a80..2c555a66d0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1045,7 +1045,7 @@ class SyncHandler: last_unread_event_id = await self.store.get_last_receipt_event_id_for_user( user_id=sync_config.user.to_string(), room_id=room_id, - receipt_type=ReceiptTypes.READ, + receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) return await self.store.get_unread_event_push_actions_by_room_for_user( diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 957c9b780b..a1bf5b20dd 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -24,7 +24,9 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - invites = await store.get_invited_rooms_for_local_user(user_id) joins = await store.get_rooms_for_user(user_id) - my_receipts_by_room = await store.get_receipts_for_user(user_id, ReceiptTypes.READ) + my_receipts_by_room = await store.get_receipts_for_user( + user_id, (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE) + ) badge = len(invites) diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index ff040de6b8..24bc7c9095 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -58,7 +58,7 @@ class NotificationsServlet(RestServlet): ) receipts_by_room = await self.store.get_receipts_for_user_with_orderings( - user_id, ReceiptTypes.READ + user_id, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) notif_event_ids = [pa.event_id for pa in push_actions] diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index f51be511d1..1583e903cd 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -15,8 +15,8 @@ import logging from typing import TYPE_CHECKING, Tuple -from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.constants import ReceiptTypes +from synapse.api.errors import SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -36,6 +36,7 @@ class ReadMarkerRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() + self.config = hs.config self.receipts_handler = hs.get_receipts_handler() self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() @@ -48,27 +49,38 @@ class ReadMarkerRestServlet(RestServlet): await self.presence_handler.bump_presence_active_time(requester.user) body = parse_json_object_from_request(request) - read_event_id = body.get(ReceiptTypes.READ, None) - hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) - if not isinstance(hidden, bool): + valid_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ} + if self.config.experimental.msc2285_enabled: + valid_receipt_types.add(ReceiptTypes.READ_PRIVATE) + + if set(body.keys()) > valid_receipt_types: raise SynapseError( 400, - "Param %s must be a boolean, if given" - % ReadReceiptEventFields.MSC2285_HIDDEN, - Codes.BAD_JSON, + "Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'" + if self.config.experimental.msc2285_enabled + else "Receipt type must be 'm.read' or 'm.fully_read'", ) + read_event_id = body.get(ReceiptTypes.READ, None) if read_event_id: await self.receipts_handler.received_client_receipt( room_id, ReceiptTypes.READ, user_id=requester.user.to_string(), event_id=read_event_id, - hidden=hidden, ) - read_marker_event_id = body.get("m.fully_read", None) + read_private_event_id = body.get(ReceiptTypes.READ_PRIVATE, None) + if read_private_event_id and self.config.experimental.msc2285_enabled: + await self.receipts_handler.received_client_receipt( + room_id, + ReceiptTypes.READ_PRIVATE, + user_id=requester.user.to_string(), + event_id=read_private_event_id, + ) + + read_marker_event_id = body.get(ReceiptTypes.FULLY_READ, None) if read_marker_event_id: await self.read_marker_handler.received_client_read_marker( room_id, diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index b24ad2d1be..f9caab6635 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -16,8 +16,8 @@ import logging import re from typing import TYPE_CHECKING, Tuple -from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.constants import ReceiptTypes +from synapse.api.errors import SynapseError from synapse.http import get_request_user_agent from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -46,6 +46,7 @@ class ReceiptRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() + self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() async def on_POST( @@ -53,7 +54,19 @@ class ReceiptRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - if receipt_type != ReceiptTypes.READ: + if self.hs.config.experimental.msc2285_enabled and receipt_type not in [ + ReceiptTypes.READ, + ReceiptTypes.READ_PRIVATE, + ReceiptTypes.FULLY_READ, + ]: + raise SynapseError( + 400, + "Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'", + ) + elif ( + not self.hs.config.experimental.msc2285_enabled + and receipt_type != ReceiptTypes.READ + ): raise SynapseError(400, "Receipt type must be 'm.read'") # Do not allow older SchildiChat and Element Android clients (prior to Element/1.[012].x) to send an empty body. @@ -62,26 +75,24 @@ class ReceiptRestServlet(RestServlet): if "Android" in user_agent: if pattern.match(user_agent) or "Riot" in user_agent: allow_empty_body = True - body = parse_json_object_from_request(request, allow_empty_body) - hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) - - if not isinstance(hidden, bool): - raise SynapseError( - 400, - "Param %s must be a boolean, if given" - % ReadReceiptEventFields.MSC2285_HIDDEN, - Codes.BAD_JSON, - ) + # This call makes sure possible empty body is handled correctly + parse_json_object_from_request(request, allow_empty_body) await self.presence_handler.bump_presence_active_time(requester.user) - await self.receipts_handler.received_client_receipt( - room_id, - receipt_type, - user_id=requester.user.to_string(), - event_id=event_id, - hidden=hidden, - ) + if receipt_type == ReceiptTypes.FULLY_READ: + await self.read_marker_handler.received_client_read_marker( + room_id, + user_id=requester.user.to_string(), + event_id=event_id, + ) + else: + await self.receipts_handler.received_client_receipt( + room_id, + receipt_type, + user_id=requester.user.to_string(), + event_id=event_id, + ) return 200, {} diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 7d96f4feda..9e3d838eab 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -144,12 +144,43 @@ class ReceiptsWorkerStore(SQLBaseStore): desc="get_receipts_for_room", ) - @cached() async def get_last_receipt_event_id_for_user( - self, user_id: str, room_id: str, receipt_type: str + self, user_id: str, room_id: str, receipt_types: Iterable[str] ) -> Optional[str]: """ - Fetch the event ID for the latest receipt in a room with the given receipt type. + Fetch the event ID for the latest receipt in a room with one of the given receipt types. + + Args: + user_id: The user to fetch receipts for. + room_id: The room ID to fetch the receipt for. + receipt_type: The receipt types to fetch. Earlier receipt types + are given priority if multiple receipts point to the same event. + + Returns: + The latest receipt, if one exists. + """ + latest_event_id: Optional[str] = None + latest_stream_ordering = 0 + for receipt_type in receipt_types: + result = await self._get_last_receipt_event_id_for_user( + user_id, room_id, receipt_type + ) + if result is None: + continue + event_id, stream_ordering = result + + if latest_event_id is None or latest_stream_ordering < stream_ordering: + latest_event_id = event_id + latest_stream_ordering = stream_ordering + + return latest_event_id + + @cached() + async def _get_last_receipt_event_id_for_user( + self, user_id: str, room_id: str, receipt_type: str + ) -> Optional[Tuple[str, int]]: + """ + Fetch the event ID and stream ordering for the latest receipt. Args: user_id: The user to fetch receipts for. @@ -157,30 +188,33 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type: The receipt type to fetch. Returns: - The event ID of the latest receipt, if one exists; otherwise `None`. + The event ID and stream ordering of the latest receipt, if one exists; + otherwise `None`. + """ + sql = """ + SELECT event_id, stream_ordering + FROM receipts_linearized + INNER JOIN events USING (room_id, event_id) + WHERE user_id = ? + AND room_id = ? + AND receipt_type = ? """ - return await self.db_pool.simple_select_one_onecol( - table="receipts_linearized", - keyvalues={ - "room_id": room_id, - "receipt_type": receipt_type, - "user_id": user_id, - }, - retcol="event_id", - desc="get_own_receipt_for_user", - allow_none=True, - ) - @cached() + def f(txn: LoggingTransaction) -> Optional[Tuple[str, int]]: + txn.execute(sql, (user_id, room_id, receipt_type)) + return cast(Optional[Tuple[str, int]], txn.fetchone()) + + return await self.db_pool.runInteraction("get_own_receipt_for_user", f) + async def get_receipts_for_user( - self, user_id: str, receipt_type: str + self, user_id: str, receipt_types: Iterable[str] ) -> Dict[str, str]: """ Fetch the event IDs for the latest receipts sent by the given user. Args: user_id: The user to fetch receipts for. - receipt_type: The receipt type to fetch. + receipt_types: The receipt types to check. Returns: A map of room ID to the event ID of the latest receipt for that room. @@ -188,16 +222,48 @@ class ReceiptsWorkerStore(SQLBaseStore): If the user has not sent a receipt to a room then it will not appear in the returned dictionary. """ - rows = await self.db_pool.simple_select_list( - table="receipts_linearized", - keyvalues={"user_id": user_id, "receipt_type": receipt_type}, - retcols=("room_id", "event_id"), - desc="get_receipts_for_user", + results = await self.get_receipts_for_user_with_orderings( + user_id, receipt_types ) - return {row["room_id"]: row["event_id"] for row in rows} + # Reduce the result to room ID -> event ID. + return { + room_id: room_result["event_id"] for room_id, room_result in results.items() + } async def get_receipts_for_user_with_orderings( + self, user_id: str, receipt_types: Iterable[str] + ) -> JsonDict: + """ + Fetch receipts for all rooms that the given user is joined to. + + Args: + user_id: The user to fetch receipts for. + receipt_types: The receipt types to fetch. Earlier receipt types + are given priority if multiple receipts point to the same event. + + Returns: + A map of room ID to the latest receipt (for the given types). + """ + results: JsonDict = {} + for receipt_type in receipt_types: + partial_result = await self._get_receipts_for_user_with_orderings( + user_id, receipt_type + ) + for room_id, room_result in partial_result.items(): + # If the room has not yet been seen, or the receipt is newer, + # use it. + if ( + room_id not in results + or results[room_id]["stream_ordering"] + < room_result["stream_ordering"] + ): + results[room_id] = room_result + + return results + + @cached() + async def _get_receipts_for_user_with_orderings( self, user_id: str, receipt_type: str ) -> JsonDict: """ @@ -220,8 +286,9 @@ class ReceiptsWorkerStore(SQLBaseStore): " WHERE rl.room_id = e.room_id" " AND rl.event_id = e.event_id" " AND user_id = ?" + " AND receipt_type = ?" ) - txn.execute(sql, (user_id,)) + txn.execute(sql, (user_id, receipt_type)) return cast(List[Tuple[str, str, int, int]], txn.fetchall()) rows = await self.db_pool.runInteraction( @@ -552,9 +619,9 @@ class ReceiptsWorkerStore(SQLBaseStore): def invalidate_caches_for_receipt( self, room_id: str, receipt_type: str, user_id: str ) -> None: - self.get_receipts_for_user.invalidate((user_id, receipt_type)) + self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type)) self._get_linearized_receipts_for_room.invalidate((room_id,)) - self.get_last_receipt_event_id_for_user.invalidate( + self._get_last_receipt_event_id_for_user.invalidate( (user_id, room_id, receipt_type) ) self._invalidate_get_users_with_receipts_in_room(room_id, receipt_type, user_id) @@ -590,8 +657,8 @@ class ReceiptsWorkerStore(SQLBaseStore): """Inserts a receipt into the database if it's newer than the current one. Returns: - None if the RR is older than the current RR - otherwise, the rx timestamp of the event that the RR corresponds to + None if the receipt is older than the current receipt + otherwise, the rx timestamp of the event that the receipt corresponds to (or 0 if the event is unknown) """ assert self._can_write_to_receipts @@ -612,7 +679,7 @@ class ReceiptsWorkerStore(SQLBaseStore): if stream_ordering is not None: sql = ( "SELECT stream_ordering, event_id FROM events" - " INNER JOIN receipts_linearized as r USING (event_id, room_id)" + " INNER JOIN receipts_linearized AS r USING (event_id, room_id)" " WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?" ) txn.execute(sql, (room_id, receipt_type, user_id)) @@ -653,7 +720,10 @@ class ReceiptsWorkerStore(SQLBaseStore): lock=False, ) - if receipt_type == ReceiptTypes.READ and stream_ordering is not None: + if ( + receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE) + and stream_ordering is not None + ): self._remove_old_push_actions_before_txn( # type: ignore[attr-defined] txn, room_id=room_id, user_id=user_id, stream_ordering=stream_ordering ) @@ -672,6 +742,10 @@ class ReceiptsWorkerStore(SQLBaseStore): Automatically does conversion between linearized and graph representations. + + Returns: + The new receipts stream ID and token, if the receipt is newer than + what was previously persisted. None, otherwise. """ assert self._can_write_to_receipts @@ -719,6 +793,7 @@ class ReceiptsWorkerStore(SQLBaseStore): stream_id=stream_id, ) + # If the receipt was older than the currently persisted one, nothing to do. if event_ts is None: return None @@ -774,7 +849,10 @@ class ReceiptsWorkerStore(SQLBaseStore): receipt_type, user_id, ) - txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type)) + txn.call_after( + self._get_receipts_for_user_with_orderings.invalidate, + (user_id, receipt_type), + ) # FIXME: This shouldn't invalidate the whole cache txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index 65ab7db0c8..c12a9120f0 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -15,7 +15,7 @@ from typing import List -from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes +from synapse.api.constants import ReceiptTypes from synapse.types import JsonDict from tests import unittest @@ -25,20 +25,15 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources.receipt - # In the first param of _test_filters_hidden we use "hidden" instead of - # ReadReceiptEventFields.MSC2285_HIDDEN. We do this because we're mocking - # the data from the database which doesn't use the prefix - def test_filters_out_hidden_receipt(self): self._test_filters_hidden( [ { "content": { "$1435641916114394fHBLK:matrix.org": { - ReceiptTypes.READ: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, - "hidden": True, } } } @@ -50,58 +45,23 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [], ) - def test_does_not_filter_out_our_hidden_receipt(self): - self._test_filters_hidden( - [ - { - "content": { - "$1435641916hfgh4394fHBLK:matrix.org": { - ReceiptTypes.READ: { - "@me:server.org": { - "ts": 1436451550453, - "hidden": True, - }, - } - } - }, - "room_id": "!jEsUZKDJdhlrceRyVU:example.org", - "type": "m.receipt", - } - ], - [ - { - "content": { - "$1435641916hfgh4394fHBLK:matrix.org": { - ReceiptTypes.READ: { - "@me:server.org": { - "ts": 1436451550453, - ReadReceiptEventFields.MSC2285_HIDDEN: True, - }, - } - } - }, - "room_id": "!jEsUZKDJdhlrceRyVU:example.org", - "type": "m.receipt", - } - ], - ) - def test_filters_out_hidden_receipt_and_ignores_rest(self): self._test_filters_hidden( [ { "content": { "$1dgdgrd5641916114394fHBLK:matrix.org": { - ReceiptTypes.READ: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, - "hidden": True, }, + }, + ReceiptTypes.READ: { "@user:jki.re": { "ts": 1436451550453, }, - } - } + }, + }, }, "room_id": "!jEsUZKDJdhlrceRyVU:example.org", "type": "m.receipt", @@ -130,10 +90,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$14356419edgd14394fHBLK:matrix.org": { - ReceiptTypes.READ: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, - "hidden": True, }, } }, @@ -223,7 +182,6 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [ { "content": { - "$143564gdfg6114394fHBLK:matrix.org": {}, "$1435641916114394fHBLK:matrix.org": { ReceiptTypes.READ: { "@user:jki.re": { @@ -244,10 +202,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): { "content": { "$14356419edgd14394fHBLK:matrix.org": { - ReceiptTypes.READ: { + ReceiptTypes.READ_PRIVATE: { "@rikj:jki.re": { "ts": 1436451550453, - "hidden": True, }, } }, @@ -306,7 +263,73 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): "type": "m.receipt", }, ], - [], + [ + { + "content": { + "$14356419edgd14394fHBLK:matrix.org": { + ReceiptTypes.READ: { + "@rikj:jki.re": "string", + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + }, + ], + ) + + def test_leaves_our_hidden_and_their_public(self): + self._test_filters_hidden( + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + ReceiptTypes.READ_PRIVATE: { + "@me:server.org": { + "ts": 1436451550453, + }, + }, + ReceiptTypes.READ: { + "@rikj:jki.re": { + "ts": 1436451550453, + }, + }, + "a.receipt.type": { + "@rikj:jki.re": { + "ts": 1436451550453, + }, + }, + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + ReceiptTypes.READ_PRIVATE: { + "@me:server.org": { + "ts": 1436451550453, + }, + }, + ReceiptTypes.READ: { + "@rikj:jki.re": { + "ts": 1436451550453, + }, + }, + "a.receipt.type": { + "@rikj:jki.re": { + "ts": 1436451550453, + }, + }, + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], ) def _test_filters_hidden( diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py index de19e75b9d..5bbbd5fbcb 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/replication/slave/storage/test_receipts.py @@ -14,26 +14,246 @@ from synapse.api.constants import ReceiptTypes from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.types import UserID, create_requester + +from tests.test_utils.event_injection import create_event from ._base import BaseSlavedStoreTestCase -USER_ID = "@feeling:blue" -ROOM_ID = "!room:blue" -EVENT_ID = "$event:blue" +OTHER_USER_ID = "@other:test" +OUR_USER_ID = "@our:test" class SlavedReceiptTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedReceiptsStore - def test_receipt(self): - self.check("get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {}) - self.get_success( - self.master_store.insert_receipt( - ROOM_ID, ReceiptTypes.READ, USER_ID, [EVENT_ID], {} + def prepare(self, reactor, clock, homeserver): + super().prepare(reactor, clock, homeserver) + self.room_creator = homeserver.get_room_creation_handler() + self.persist_event_storage = self.hs.get_storage().persistence + + # Create a test user + self.ourUser = UserID.from_string(OUR_USER_ID) + self.ourRequester = create_requester(self.ourUser) + + # Create a second test user + self.otherUser = UserID.from_string(OTHER_USER_ID) + self.otherRequester = create_requester(self.otherUser) + + # Create a test room + info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) + self.room_id1 = info["room_id"] + + # Create a second test room + info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) + self.room_id2 = info["room_id"] + + # Join the second user to the first room + memberEvent, memberEventContext = self.get_success( + create_event( + self.hs, + room_id=self.room_id1, + type="m.room.member", + sender=self.otherRequester.user.to_string(), + state_key=self.otherRequester.user.to_string(), + content={"membership": "join"}, ) ) - self.replicate() - self.check( - "get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {ROOM_ID: EVENT_ID} + self.get_success( + self.persist_event_storage.persist_event(memberEvent, memberEventContext) ) + + # Join the second user to the second room + memberEvent, memberEventContext = self.get_success( + create_event( + self.hs, + room_id=self.room_id2, + type="m.room.member", + sender=self.otherRequester.user.to_string(), + state_key=self.otherRequester.user.to_string(), + content={"membership": "join"}, + ) + ) + self.get_success( + self.persist_event_storage.persist_event(memberEvent, memberEventContext) + ) + + def test_return_empty_with_no_data(self): + res = self.get_success( + self.master_store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {}) + + res = self.get_success( + self.master_store.get_receipts_for_user_with_orderings( + OUR_USER_ID, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, {}) + + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id1, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, None) + + def test_get_receipts_for_user(self): + # Send some events into the first room + event1_1_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + event1_2_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + + # Send public read receipt for the first event + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} + ) + ) + # Send private read receipt for the second event + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + ) + ) + + # Test we get the latest event when we want both private and public receipts + res = self.get_success( + self.master_store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Test we get the older event when we want only public receipt + res = self.get_success( + self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + ) + self.assertEqual(res, {self.room_id1: event1_1_id}) + + # Test we get the latest event when we want only the public receipt + res = self.get_success( + self.master_store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Test receipt updating + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} + ) + ) + res = self.get_success( + self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + ) + self.assertEqual(res, {self.room_id1: event1_2_id}) + + # Send some events into the second room + event2_1_id = self.create_and_send_event( + self.room_id2, UserID.from_string(OTHER_USER_ID) + ) + + # Test new room is reflected in what the method returns + self.get_success( + self.master_store.insert_receipt( + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + ) + ) + res = self.get_success( + self.master_store.get_receipts_for_user( + OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id}) + + def test_get_last_receipt_event_id_for_user(self): + # Send some events into the first room + event1_1_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + event1_2_id = self.create_and_send_event( + self.room_id1, UserID.from_string(OTHER_USER_ID) + ) + + # Send public read receipt for the first event + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} + ) + ) + # Send private read receipt for the second event + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} + ) + ) + + # Test we get the latest event when we want both private and public receipts + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id1, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, event1_2_id) + + # Test we get the older event when we want only public receipt + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] + ) + ) + self.assertEqual(res, event1_1_id) + + # Test we get the latest event when we want only the private receipt + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] + ) + ) + self.assertEqual(res, event1_2_id) + + # Test receipt updating + self.get_success( + self.master_store.insert_receipt( + self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} + ) + ) + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] + ) + ) + self.assertEqual(res, event1_2_id) + + # Send some events into the second room + event2_1_id = self.create_and_send_event( + self.room_id2, UserID.from_string(OTHER_USER_ID) + ) + + # Test new room is reflected in what the method returns + self.get_success( + self.master_store.insert_receipt( + self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} + ) + ) + res = self.get_success( + self.master_store.get_last_receipt_event_id_for_user( + OUR_USER_ID, + self.room_id2, + [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], + ) + ) + self.assertEqual(res, event2_1_id) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index cb765455c1..67c94dd18f 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -23,7 +23,6 @@ import synapse.rest.admin from synapse.api.constants import ( EventContentFields, EventTypes, - ReadReceiptEventFields, ReceiptTypes, RelationTypes, ) @@ -347,7 +346,7 @@ class SyncKnockTestCase( # Knock on a room channel = self.make_request( "POST", - "/_matrix/client/r0/knock/%s" % (self.room_id,), + f"/_matrix/client/r0/knock/{self.room_id}", b"{}", self.knocker_tok, ) @@ -412,18 +411,79 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) - # Send a read receipt to tell the server the first user's message was read - body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") + # Send a private read receipt to tell the server the first user's message was read channel = self.make_request( "POST", - "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), - body, + f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}", + {}, access_token=self.tok2, ) self.assertEqual(channel.code, 200) - # Test that the first user can't see the other user's hidden read receipt - self.assertEqual(self._get_read_receipt(), None) + # Test that the first user can't see the other user's private read receipt + self.assertIsNone(self._get_read_receipt()) + + @override_config({"experimental_features": {"msc2285_enabled": True}}) + def test_public_receipt_can_override_private(self) -> None: + """ + Sending a public read receipt to the same event which has a private read + receipt should cause that receipt to become public. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertIsNone(self._get_read_receipt()) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we did override the private read receipt + self.assertNotEqual(self._get_read_receipt(), None) + + @override_config({"experimental_features": {"msc2285_enabled": True}}) + def test_private_receipt_cannot_override_public(self) -> None: + """ + Sending a private read receipt to the same event which has a public read + receipt should cause no change. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertNotEqual(self._get_read_receipt(), None) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we didn't override the public read receipt + self.assertIsNone(self._get_read_receipt()) @parameterized.expand( [ @@ -455,7 +515,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Send a read receipt for this message with an empty body channel = self.make_request( "POST", - "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + f"/rooms/{self.room_id}/receipt/m.read/{res['event_id']}", access_token=self.tok2, custom_headers=[("User-Agent", user_agent)], ) @@ -479,6 +539,9 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): # Store the next batch for the next request. self.next_batch = channel.json_body["next_batch"] + if channel.json_body.get("rooms", None) is None: + return None + # Return the read receipt ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ "ephemeral" @@ -499,7 +562,10 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): def default_config(self) -> JsonDict: config = super().default_config() - config["experimental_features"] = {"msc2654_enabled": True} + config["experimental_features"] = { + "msc2654_enabled": True, + "msc2285_enabled": True, + } return config def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -564,7 +630,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): body = json.dumps({ReceiptTypes.READ: res["event_id"]}).encode("utf8") channel = self.make_request( "POST", - "/rooms/%s/read_markers" % self.room_id, + f"/rooms/{self.room_id}/read_markers", body, access_token=self.tok, ) @@ -578,11 +644,10 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. - body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") channel = self.make_request( "POST", - "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), - body, + f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}", + {}, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) @@ -644,13 +709,73 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self._check_unread_count(4) # Check that tombstone events changes increase the unread counter. - self.helper.send_state( + res1 = self.helper.send_state( self.room_id, EventTypes.Tombstone, {"replacement_room": "!someroom:test"}, tok=self.tok2, ) self._check_unread_count(5) + res2 = self.helper.send(self.room_id, "hello", tok=self.tok2) + + # Make sure both m.read and org.matrix.msc2285.read.private advance + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/{res1['event_id']}", + {}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self._check_unread_count(1) + + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res2['event_id']}", + {}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self._check_unread_count(0) + + # We test for both receipt types that influence notification counts + @parameterized.expand([ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]) + def test_read_receipts_only_go_down(self, receipt_type: ReceiptTypes) -> None: + # Join the new user + self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) + + # Send messages + res1 = self.helper.send(self.room_id, "hello", tok=self.tok2) + res2 = self.helper.send(self.room_id, "hello", tok=self.tok2) + + # Read last event + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{receipt_type}/{res2['event_id']}", + {}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self._check_unread_count(0) + + # Make sure neither m.read nor org.matrix.msc2285.read.private make the + # read receipt go up to an older event + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res1['event_id']}", + {}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self._check_unread_count(0) + + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/{res1['event_id']}", + {}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self._check_unread_count(0) def _check_unread_count(self, expected_count: int) -> None: """Syncs and compares the unread count with the expected value.""" @@ -663,9 +788,11 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.json_body) - room_entry = channel.json_body["rooms"]["join"][self.room_id] + room_entry = ( + channel.json_body.get("rooms", {}).get("join", {}).get(self.room_id, {}) + ) self.assertEqual( - room_entry["org.matrix.msc2654.unread_count"], + room_entry.get("org.matrix.msc2654.unread_count", 0), expected_count, room_entry, ) From 7fbf42499d92ec3c9a05d9f36ec5fecd1ab1f18c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 May 2022 14:11:21 -0400 Subject: [PATCH 41/87] Use `getClientAddress` instead of `getClientIP`. (#12599) getClientIP was deprecated in Twisted 18.4.0, which also added getClientAddress. The Synapse minimum version for Twisted is currently 18.9.0, so all supported versions have the new API. --- changelog.d/12599.misc | 1 + synapse/api/auth.py | 4 ++-- synapse/handlers/auth.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/sso.py | 4 ++-- synapse/http/site.py | 6 +++--- synapse/logging/opentracing.py | 2 +- synapse/rest/client/auth.py | 8 +++++--- synapse/rest/client/login.py | 14 ++++++++++---- synapse/rest/client/register.py | 6 +++--- tests/api/test_auth.py | 18 +++++++++--------- tests/handlers/test_cas.py | 2 +- tests/handlers/test_oidc.py | 4 ++-- tests/handlers/test_saml.py | 2 +- tests/replication/_base.py | 20 ++++++++++++-------- tests/server.py | 13 ++++++++----- 16 files changed, 62 insertions(+), 46 deletions(-) create mode 100644 changelog.d/12599.misc diff --git a/changelog.d/12599.misc b/changelog.d/12599.misc new file mode 100644 index 0000000000..d01278bbce --- /dev/null +++ b/changelog.d/12599.misc @@ -0,0 +1 @@ +Use `getClientAddress` instead of the deprecated `getClientIP`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 01c32417d8..f6202ef7a5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -187,7 +187,7 @@ class Auth: Once get_user_by_req has set up the opentracing span, this does the actual work. """ try: - ip_addr = request.getClientIP() + ip_addr = request.getClientAddress().host user_agent = get_request_user_agent(request) access_token = self.get_access_token_from_request(request) @@ -356,7 +356,7 @@ class Auth: return None, None, None if app_service.ip_range_whitelist: - ip_address = IPAddress(request.getClientIP()) + ip_address = IPAddress(request.getClientAddress().host) if ip_address not in app_service.ip_range_whitelist: return None, None, None diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 22678d486d..ad41337b28 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -551,7 +551,7 @@ class AuthHandler: await self.store.set_ui_auth_clientdict(sid, clientdict) user_agent = get_request_user_agent(request) - clientip = request.getClientIP() + clientip = request.getClientAddress().host await self.store.add_user_agent_ip_to_ui_auth_session( session.session_id, user_agent, clientip diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c183e9c465..9bca2bc4b2 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -92,7 +92,7 @@ class IdentityHandler: """ await self._3pid_validation_ratelimiter_ip.ratelimit( - None, (medium, request.getClientIP()) + None, (medium, request.getClientAddress().host) ) await self._3pid_validation_ratelimiter_address.ratelimit( None, (medium, address) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index e4fe94e557..1e171f3f71 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -468,7 +468,7 @@ class SsoHandler: auth_provider_id, remote_user_id, get_request_user_agent(request), - request.getClientIP(), + request.getClientAddress().host, ) new_user = True elif self._sso_update_profile_information: @@ -928,7 +928,7 @@ class SsoHandler: session.auth_provider_id, session.remote_user_id, get_request_user_agent(request), - request.getClientIP(), + request.getClientAddress().host, ) logger.info( diff --git a/synapse/http/site.py b/synapse/http/site.py index 40f6c04894..0b85a57d77 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -238,7 +238,7 @@ class SynapseRequest(Request): request_id, request=ContextRequest( request_id=request_id, - ip_address=self.getClientIP(), + ip_address=self.getClientAddress().host, site_tag=self.synapse_site.site_tag, # The requester is going to be unknown at this point. requester=None, @@ -381,7 +381,7 @@ class SynapseRequest(Request): self.synapse_site.access_logger.debug( "%s - %s - Received request: %s %s", - self.getClientIP(), + self.getClientAddress().host, self.synapse_site.site_tag, self.get_method(), self.get_redacted_uri(), @@ -429,7 +429,7 @@ class SynapseRequest(Request): "%s - %s - {%s}" " Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" ' %sB %s "%s %s %s" "%s" [%d dbevts]', - self.getClientIP(), + self.getClientAddress().host, self.synapse_site.site_tag, requester, processing_time, diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index f86ee9aac7..a02b5bf6bd 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -884,7 +884,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), - tags.PEER_HOST_IPV6: request.getClientIP(), + tags.PEER_HOST_IPV6: request.getClientAddress().host, } request_name = request.request_metrics.name diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index e0b2b80e5b..eb77337044 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -112,7 +112,7 @@ class AuthRestServlet(RestServlet): try: await self.auth_handler.add_oob_auth( - LoginType.RECAPTCHA, authdict, request.getClientIP() + LoginType.RECAPTCHA, authdict, request.getClientAddress().host ) except LoginError as e: # Authentication failed, let user try again @@ -132,7 +132,7 @@ class AuthRestServlet(RestServlet): try: await self.auth_handler.add_oob_auth( - LoginType.TERMS, authdict, request.getClientIP() + LoginType.TERMS, authdict, request.getClientAddress().host ) except LoginError as e: # Authentication failed, let user try again @@ -161,7 +161,9 @@ class AuthRestServlet(RestServlet): try: await self.auth_handler.add_oob_auth( - LoginType.REGISTRATION_TOKEN, authdict, request.getClientIP() + LoginType.REGISTRATION_TOKEN, + authdict, + request.getClientAddress().host, ) except LoginError as e: html = self.registration_token_template.render( diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 71d8038448..cf4196ac0a 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -176,7 +176,7 @@ class LoginRestServlet(RestServlet): if appservice.is_rate_limited(): await self._address_ratelimiter.ratelimit( - None, request.getClientIP() + None, request.getClientAddress().host ) result = await self._do_appservice_login( @@ -188,19 +188,25 @@ class LoginRestServlet(RestServlet): self.jwt_enabled and login_submission["type"] == LoginRestServlet.JWT_TYPE ): - await self._address_ratelimiter.ratelimit(None, request.getClientIP()) + await self._address_ratelimiter.ratelimit( + None, request.getClientAddress().host + ) result = await self._do_jwt_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, ) elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE: - await self._address_ratelimiter.ratelimit(None, request.getClientIP()) + await self._address_ratelimiter.ratelimit( + None, request.getClientAddress().host + ) result = await self._do_token_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, ) else: - await self._address_ratelimiter.ratelimit(None, request.getClientIP()) + await self._address_ratelimiter.ratelimit( + None, request.getClientAddress().host + ) result = await self._do_other_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 13ef6b35a0..47b6db1ebf 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -352,7 +352,7 @@ class UsernameAvailabilityRestServlet(RestServlet): if self.inhibit_user_in_use_error: return 200, {"available": True} - ip = request.getClientIP() + ip = request.getClientAddress().host with self.ratelimiter.ratelimit(ip) as wait_deferred: await wait_deferred @@ -394,7 +394,7 @@ class RegistrationTokenValidityRestServlet(RestServlet): ) async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: - await self.ratelimiter.ratelimit(None, (request.getClientIP(),)) + await self.ratelimiter.ratelimit(None, (request.getClientAddress().host,)) if not self.hs.config.registration.enable_registration: raise SynapseError( @@ -441,7 +441,7 @@ class RegisterRestServlet(RestServlet): async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: body = parse_json_object_from_request(request) - client_addr = request.getClientIP() + client_addr = request.getClientAddress().host await self.ratelimiter.ratelimit(None, client_addr, update=False) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 3e05789923..d547df8a64 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -105,7 +105,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) @@ -124,7 +124,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "192.168.10.10" + request.getClientAddress.return_value.host = "192.168.10.10" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) @@ -143,7 +143,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "131.111.8.42" + request.getClientAddress.return_value.host = "131.111.8.42" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() f = self.get_failure( @@ -190,7 +190,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() @@ -209,7 +209,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() @@ -236,7 +236,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_device = simple_async_mock({"hidden": False}) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] @@ -268,7 +268,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_device = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.args[b"user_id"] = [masquerading_user_id] request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] @@ -288,7 +288,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) self.store.insert_client_ip = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() self.get_success(self.auth.get_user_by_req(request)) @@ -305,7 +305,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) self.store.insert_client_ip = simple_async_mock(None) request = Mock(args={}) - request.getClientIP.return_value = "127.0.0.1" + request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() self.get_success(self.auth.get_user_by_req(request)) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 751025c5da..2b21547d0f 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -204,7 +204,7 @@ def _mock_request(): mock = Mock( spec=[ "finish", - "getClientIP", + "getClientAddress", "getHeader", "setHeader", "setResponseCode", diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 9684120c70..1231aed944 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -1300,7 +1300,7 @@ def _build_callback_request( "getCookie", "cookies", "requestHeaders", - "getClientIP", + "getClientAddress", "getHeader", ] ) @@ -1310,5 +1310,5 @@ def _build_callback_request( request.args = {} request.args[b"code"] = [code.encode("utf-8")] request.args[b"state"] = [state.encode("utf-8")] - request.getClientIP.return_value = ip_address + request.getClientAddress.return_value.host = ip_address return request diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index e2f0f90ef1..a0f84e2940 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -352,7 +352,7 @@ def _mock_request(): mock = Mock( spec=[ "finish", - "getClientIP", + "getClientAddress", "getHeader", "setHeader", "setResponseCode", diff --git a/tests/replication/_base.py b/tests/replication/_base.py index a0589b6d6a..a7602b4c96 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -154,10 +154,12 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self.assertEqual(port, 8765) # Set up client side protocol - client_protocol = client_factory.buildProtocol(None) + client_address = IPv4Address("TCP", "127.0.0.1", 1234) + client_protocol = client_factory.buildProtocol(("127.0.0.1", 1234)) # Set up the server side protocol - channel = self.site.buildProtocol(None) + server_address = IPv4Address("TCP", host, port) + channel = self.site.buildProtocol((host, port)) # hook into the channel's request factory so that we can keep a record # of the requests @@ -173,12 +175,12 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # Connect client to server and vice versa. client_to_server_transport = FakeTransport( - channel, self.reactor, client_protocol + channel, self.reactor, client_protocol, server_address, client_address ) client_protocol.makeConnection(client_to_server_transport) server_to_client_transport = FakeTransport( - client_protocol, self.reactor, channel + client_protocol, self.reactor, channel, client_address, server_address ) channel.makeConnection(server_to_client_transport) @@ -406,19 +408,21 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.assertEqual(port, repl_port) # Set up client side protocol - client_protocol = client_factory.buildProtocol(None) + client_address = IPv4Address("TCP", "127.0.0.1", 1234) + client_protocol = client_factory.buildProtocol(("127.0.0.1", 1234)) # Set up the server side protocol - channel = self._hs_to_site[hs].buildProtocol(None) + server_address = IPv4Address("TCP", host, port) + channel = self._hs_to_site[hs].buildProtocol((host, port)) # Connect client to server and vice versa. client_to_server_transport = FakeTransport( - channel, self.reactor, client_protocol + channel, self.reactor, client_protocol, server_address, client_address ) client_protocol.makeConnection(client_to_server_transport) server_to_client_transport = FakeTransport( - client_protocol, self.reactor, channel + client_protocol, self.reactor, channel, client_address, server_address ) channel.makeConnection(server_to_client_transport) diff --git a/tests/server.py b/tests/server.py index 16559d2588..8f30e250c8 100644 --- a/tests/server.py +++ b/tests/server.py @@ -181,7 +181,7 @@ class FakeChannel: self.resource_usage = _self.logcontext.get_resource_usage() def getPeer(self): - # We give an address so that getClientIP returns a non null entry, + # We give an address so that getClientAddress/getClientIP returns a non null entry, # causing us to record the MAU return address.IPv4Address("TCP", self._ip, 3423) @@ -562,7 +562,10 @@ class FakeTransport: """ _peer_address: Optional[IAddress] = attr.ib(default=None) - """The value to be returend by getPeer""" + """The value to be returned by getPeer""" + + _host_address: Optional[IAddress] = attr.ib(default=None) + """The value to be returned by getHost""" disconnecting = False disconnected = False @@ -571,11 +574,11 @@ class FakeTransport: producer = attr.ib(default=None) autoflush = attr.ib(default=True) - def getPeer(self): + def getPeer(self) -> Optional[IAddress]: return self._peer_address - def getHost(self): - return None + def getHost(self) -> Optional[IAddress]: + return self._host_address def loseConnection(self, reason=None): if not self.disconnecting: From 2d74a8c17876ed235249b1423d63c920c614a530 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 4 May 2022 19:33:26 +0100 Subject: [PATCH 42/87] Add `mau_appservice_trial_days` config (#12619) * Add mau_appservice_trial_days * Add a test * Tweaks * changelog * Ensure we sync after the delay * Fix types * Add config statement * Fix test * Reinstate logging that got removed * Fix feature name --- changelog.d/12619.feature | 1 + docs/sample_config.yaml | 7 ++ .../configuration/config_documentation.md | 14 ++++ synapse/config/server.py | 8 ++ .../storage/databases/main/registration.py | 8 +- tests/test_mau.py | 74 +++++++++++++++++++ 6 files changed, 110 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12619.feature diff --git a/changelog.d/12619.feature b/changelog.d/12619.feature new file mode 100644 index 0000000000..b0fc0f5fed --- /dev/null +++ b/changelog.d/12619.feature @@ -0,0 +1 @@ +Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 5eba0fcf3d..a803b8261d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -407,6 +407,11 @@ manhole_settings: # sign up in a short space of time never to return after their initial # session. # +# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but +# applies a different trial number if the user was registered by an appservice. +# A value of 0 means no trial days are applied. Appservices not listed in this +# dictionary use the value of `mau_trial_days` instead. +# # 'mau_limit_alerting' is a means of limiting client side alerting # should the mau limit be reached. This is useful for small instances # where the admin has 5 mau seats (say) for 5 specific people and no @@ -417,6 +422,8 @@ manhole_settings: #max_mau_value: 50 #mau_trial_days: 2 #mau_limit_alerting: false +#mau_appservice_trial_days: +# "appservice-id": 1 # If enabled, the metrics for the number of monthly active users will # be populated, however no one will be limited. If limit_usage_by_mau diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 36db649467..21dad0ac41 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -627,6 +627,20 @@ Example configuration: mau_trial_days: 5 ``` --- +Config option: `mau_appservice_trial_days` + +The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different +trial number if the user was registered by an appservice. A value +of 0 means no trial days are applied. Appservices not listed in this dictionary +use the value of `mau_trial_days` instead. + +Example configuration: +```yaml +mau_appservice_trial_days: + my_appservice_id: 3 + another_appservice_id: 6 +``` +--- Config option: `mau_limit_alerting` The option `mau_limit_alerting` is a means of limiting client-side alerting diff --git a/synapse/config/server.py b/synapse/config/server.py index b6cd326416..1e709c7cf5 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -413,6 +413,7 @@ class ServerConfig(Config): ) self.mau_trial_days = config.get("mau_trial_days", 0) + self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {}) self.mau_limit_alerting = config.get("mau_limit_alerting", True) # How long to keep redacted events in the database in unredacted form @@ -1105,6 +1106,11 @@ class ServerConfig(Config): # sign up in a short space of time never to return after their initial # session. # + # The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but + # applies a different trial number if the user was registered by an appservice. + # A value of 0 means no trial days are applied. Appservices not listed in this + # dictionary use the value of `mau_trial_days` instead. + # # 'mau_limit_alerting' is a means of limiting client side alerting # should the mau limit be reached. This is useful for small instances # where the admin has 5 mau seats (say) for 5 specific people and no @@ -1115,6 +1121,8 @@ class ServerConfig(Config): #max_mau_value: 50 #mau_trial_days: 2 #mau_limit_alerting: false + #mau_appservice_trial_days: + # "appservice-id": 1 # If enabled, the metrics for the number of monthly active users will # be populated, however no one will be limited. If limit_usage_by_mau diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index d43163c27c..4991360b70 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -215,7 +215,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): async def is_trial_user(self, user_id: str) -> bool: """Checks if user is in the "trial" period, i.e. within the first - N days of registration defined by `mau_trial_days` config + N days of registration defined by `mau_trial_days` config or the + `mau_appservice_trial_days` config. Args: user_id: The user to check for trial status. @@ -226,7 +227,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): return False now = self._clock.time_msec() - trial_duration_ms = self.config.server.mau_trial_days * 24 * 60 * 60 * 1000 + days = self.config.server.mau_appservice_trial_days.get( + info["appservice_id"], self.config.server.mau_trial_days + ) + trial_duration_ms = days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms return is_trial diff --git a/tests/test_mau.py b/tests/test_mau.py index 46bd3075de..5bbc361aa2 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -14,6 +14,8 @@ """Tests REST events for /rooms paths.""" +from typing import List + from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService @@ -229,6 +231,78 @@ class TestMauLimit(unittest.HomeserverTestCase): self.reactor.advance(100) self.assertEqual(2, self.successResultOf(count)) + @override_config( + { + "mau_trial_days": 3, + "mau_appservice_trial_days": {"SomeASID": 1, "AnotherASID": 2}, + } + ) + def test_as_trial_days(self): + user_tokens: List[str] = [] + + def advance_time_and_sync(): + self.reactor.advance(24 * 60 * 61) + for token in user_tokens: + self.do_sync_for_user(token) + + # Cheekily add an application service that we use to register a new user + # with. + as_token_1 = "foobartoken1" + self.store.services_cache.append( + ApplicationService( + token=as_token_1, + hostname=self.hs.hostname, + id="SomeASID", + sender="@as_sender_1:test", + namespaces={"users": [{"regex": "@as_1.*", "exclusive": True}]}, + ) + ) + + as_token_2 = "foobartoken2" + self.store.services_cache.append( + ApplicationService( + token=as_token_2, + hostname=self.hs.hostname, + id="AnotherASID", + sender="@as_sender_2:test", + namespaces={"users": [{"regex": "@as_2.*", "exclusive": True}]}, + ) + ) + + user_tokens.append(self.create_user("kermit1")) + user_tokens.append(self.create_user("kermit2")) + user_tokens.append( + self.create_user("as_1kermit3", token=as_token_1, appservice=True) + ) + user_tokens.append( + self.create_user("as_2kermit4", token=as_token_2, appservice=True) + ) + + # Advance time by 1 day to include the first appservice + advance_time_and_sync() + self.assertEqual( + self.get_success(self.store.get_monthly_active_count_by_service()), + {"SomeASID": 1}, + ) + + # Advance time by 1 day to include the next appservice + advance_time_and_sync() + self.assertEqual( + self.get_success(self.store.get_monthly_active_count_by_service()), + {"SomeASID": 1, "AnotherASID": 1}, + ) + + # Advance time by 1 day to include the native users + advance_time_and_sync() + self.assertEqual( + self.get_success(self.store.get_monthly_active_count_by_service()), + { + "SomeASID": 1, + "AnotherASID": 1, + "native": 2, + }, + ) + def create_user(self, localpart, token=None, appservice=False): request_data = { "username": localpart, From c0379d6e5b3ec277788018670e69f9dc848bfb34 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 May 2022 10:20:23 +0100 Subject: [PATCH 43/87] Reduce log spam when running multiple event persisters (#12610) --- changelog.d/12610.misc | 1 + synapse/replication/tcp/handler.py | 9 +++++++-- synapse/replication/tcp/resource.py | 9 +++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12610.misc diff --git a/changelog.d/12610.misc b/changelog.d/12610.misc new file mode 100644 index 0000000000..02efe0c72f --- /dev/null +++ b/changelog.d/12610.misc @@ -0,0 +1 @@ +Reduce log spam when running multiple event persisters. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 615f1828dd..9aba1cd451 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -537,7 +537,7 @@ class ReplicationCommandHandler: # Ignore POSITION that are just our own echoes return - logger.info("Handling '%s %s'", cmd.NAME, cmd.to_line()) + logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line()) self._add_command_to_stream_queue(conn, cmd) @@ -567,6 +567,11 @@ class ReplicationCommandHandler: # between then and now. missing_updates = cmd.prev_token != current_token while missing_updates: + # Note: There may very well not be any new updates, but we check to + # make sure. This can particularly happen for the event stream where + # event persisters continuously send `POSITION`. See `resource.py` + # for why this can happen. + logger.info( "Fetching replication rows for '%s' between %i and %i", stream_name, @@ -590,7 +595,7 @@ class ReplicationCommandHandler: [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) + logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index c6870df8f9..99f09669f0 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -204,6 +204,15 @@ class ReplicationStreamer: # turns out that e.g. account data streams share # their "current token" with each other, meaning # that it is *not* safe to send a POSITION. + + # Note: `last_token` may not *actually* be the + # last token we sent out in a RDATA or POSITION. + # This can happen if we sent out an RDATA for + # position X when our current token was say X+1. + # Other workers will see RDATA for X and then a + # POSITION with last token of X+1, which will + # cause them to check if there were any missing + # updates between X and X+1. logger.info( "Sending position: %s -> %s", stream.NAME, From cc7656099d882735507c41ea6efcba961f53b7ec Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 5 May 2022 12:11:52 +0100 Subject: [PATCH 44/87] Fix typo in some instances of enable_registration_token_3pid_bypass. (#12639) --- changelog.d/12639.bugfix | 1 + synapse/config/registration.py | 4 ++-- synapse/handlers/ui_auth/checkers.py | 2 +- synapse/rest/client/register.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12639.bugfix diff --git a/changelog.d/12639.bugfix b/changelog.d/12639.bugfix new file mode 100644 index 0000000000..c01596282c --- /dev/null +++ b/changelog.d/12639.bugfix @@ -0,0 +1 @@ +Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. \ No newline at end of file diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 70eb7e6a97..d2d0425e62 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -43,8 +43,8 @@ class RegistrationConfig(Config): self.registration_requires_token = config.get( "registration_requires_token", False ) - self.enable_registration_token_3pid_bypasss = config.get( - "enable_registration_token_3pid_bypasss", False + self.enable_registration_token_3pid_bypass = config.get( + "enable_registration_token_3pid_bypass", False ) self.registration_shared_secret = config.get("registration_shared_secret") diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index e2a441066d..05cebb5d4d 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -258,7 +258,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): self.hs = hs self._enabled = bool( hs.config.registration.registration_requires_token - ) or bool(hs.config.registration.enable_registration_token_3pid_bypasss) + ) or bool(hs.config.registration.enable_registration_token_3pid_bypass) self.store = hs.get_datastores().main def is_enabled(self) -> bool: diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 47b6db1ebf..e8e51a9c66 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -930,7 +930,7 @@ def _calculate_registration_flows( flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY]) # Add a flow that doesn't require any 3pids, if the config requests it. - if config.registration.enable_registration_token_3pid_bypasss: + if config.registration.enable_registration_token_3pid_bypass: flows.append([LoginType.REGISTRATION_TOKEN]) # Prepend m.login.terms to all flows if we're requiring consent From ddc8bba00f06321e0758afaf88a65e190ca4c714 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 May 2022 07:51:19 -0400 Subject: [PATCH 45/87] Remove unused receipt datastore methods. (#12632) The last usage was removed in 5a1dd297c3ce105a7f516d9d9fe87b94b9d356c8 (#8059). --- changelog.d/12632.misc | 1 + synapse/storage/databases/main/receipts.py | 54 ---------------------- 2 files changed, 1 insertion(+), 54 deletions(-) create mode 100644 changelog.d/12632.misc diff --git a/changelog.d/12632.misc b/changelog.d/12632.misc new file mode 100644 index 0000000000..9e4ba79c79 --- /dev/null +++ b/changelog.d/12632.misc @@ -0,0 +1 @@ +Remove unused code related to receipts. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 9e3d838eab..d035969a31 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -22,7 +22,6 @@ from typing import ( Iterable, List, Optional, - Set, Tuple, cast, ) @@ -117,33 +116,6 @@ class ReceiptsWorkerStore(SQLBaseStore): """Get the current max stream ID for receipts stream""" return self._receipts_id_gen.get_current_token() - @cached() - async def get_users_with_read_receipts_in_room(self, room_id: str) -> Set[str]: - receipts = await self.get_receipts_for_room(room_id, ReceiptTypes.READ) - return {r["user_id"] for r in receipts} - - @cached() - async def get_receipts_for_room( - self, room_id: str, receipt_type: str - ) -> List[Dict[str, Any]]: - """ - Fetch the event IDs for the latest receipt for all users in a room with the given receipt type. - - Args: - room_id: The room ID to fetch the receipt for. - receipt_type: The receipt type to fetch. - - Returns: - A list of dictionaries, one for each user ID. Each dictionary - contains a user ID and the event ID of that user's latest receipt. - """ - return await self.db_pool.simple_select_list( - table="receipts_linearized", - keyvalues={"room_id": room_id, "receipt_type": receipt_type}, - retcols=("user_id", "event_id"), - desc="get_receipts_for_room", - ) - async def get_last_receipt_event_id_for_user( self, user_id: str, room_id: str, receipt_types: Iterable[str] ) -> Optional[str]: @@ -599,23 +571,6 @@ class ReceiptsWorkerStore(SQLBaseStore): "get_all_updated_receipts", get_all_updated_receipts_txn ) - def _invalidate_get_users_with_receipts_in_room( - self, room_id: str, receipt_type: str, user_id: str - ) -> None: - if receipt_type != ReceiptTypes.READ: - return - - res = self.get_users_with_read_receipts_in_room.cache.get_immediate( - room_id, None, update_metrics=False - ) - - if res and user_id in res: - # We'd only be adding to the set, so no point invalidating if the - # user is already there - return - - self.get_users_with_read_receipts_in_room.invalidate((room_id,)) - def invalidate_caches_for_receipt( self, room_id: str, receipt_type: str, user_id: str ) -> None: @@ -624,8 +579,6 @@ class ReceiptsWorkerStore(SQLBaseStore): self._get_last_receipt_event_id_for_user.invalidate( (user_id, room_id, receipt_type) ) - self._invalidate_get_users_with_receipts_in_room(room_id, receipt_type, user_id) - self.get_receipts_for_room.invalidate((room_id, receipt_type)) def process_replication_rows( self, @@ -842,13 +795,6 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> None: assert self._can_write_to_receipts - txn.call_after(self.get_receipts_for_room.invalidate, (room_id, receipt_type)) - txn.call_after( - self._invalidate_get_users_with_receipts_in_room, - room_id, - receipt_type, - user_id, - ) txn.call_after( self._get_receipts_for_user_with_orderings.invalidate, (user_id, receipt_type), From f90d381c7b93b98a5b6d7353196e2ae0912ea822 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 May 2022 08:15:12 -0400 Subject: [PATCH 46/87] Edits/annotations should not have any bundled aggregations calculated. (#12633) Fixes a regression from 8b309adb436c162510ed1402f33b8741d71fc058 (#11660) and b65acead428653b988351ae8d7b22127a22039cd (#11752) where events which themselves were an edit or an annotation could have bundled aggregations calculated, which is not allowed. --- changelog.d/12633.bugfix | 1 + synapse/handlers/relations.py | 38 ++++++++++++++--------------- tests/rest/client/test_relations.py | 31 +++++++++++++++++++++++ 3 files changed, 50 insertions(+), 20 deletions(-) create mode 100644 changelog.d/12633.bugfix diff --git a/changelog.d/12633.bugfix b/changelog.d/12633.bugfix new file mode 100644 index 0000000000..32332acd9a --- /dev/null +++ b/changelog.d/12633.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index cec5740fbd..c2754ec918 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -364,21 +364,29 @@ class RelationsHandler: The results may include additional events which are related to the requested events. """ - # De-duplicate events by ID to handle the same event requested multiple times. - # - # State events do not get bundled aggregations. - events_by_id = { - event.event_id: event for event in events if not event.is_state() - } - + # De-duplicated events by ID to handle the same event requested multiple times. + events_by_id = {} # A map of event ID to the relation in that event, if there is one. relations_by_id: Dict[str, str] = {} - for event_id, event in events_by_id.items(): + for event in events: + # State events do not get bundled aggregations. + if event.is_state(): + continue + relates_to = event.content.get("m.relates_to") + relation_type = None if isinstance(relates_to, collections.abc.Mapping): relation_type = relates_to.get("rel_type") - if isinstance(relation_type, str): - relations_by_id[event_id] = relation_type + # An event which is a replacement (ie edit) or annotation (ie, + # reaction) may not have any other event related to it. + if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): + continue + + # The event should get bundled aggregations. + events_by_id[event.event_id] = event + # Track the event's relation information for later. + if isinstance(relation_type, str): + relations_by_id[event.event_id] = relation_type # event ID -> bundled aggregation in non-serialized form. results: Dict[str, BundledAggregations] = {} @@ -413,16 +421,6 @@ class RelationsHandler: # Fetch other relations per event. for event in events_by_id.values(): - # An event which is a replacement (ie edit) or annotation (ie, reaction) - # may not have any other event related to it. - # - # XXX This is buggy, see https://github.com/matrix-org/synapse/issues/12566 - if relations_by_id.get(event.event_id) in ( - RelationTypes.ANNOTATION, - RelationTypes.REPLACE, - ): - continue - # Fetch any annotations (ie, reactions) to bundle with this event. annotations = await self.get_annotations_for_event( event.event_id, event.room_id, ignored_users=ignored_users diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 33ce9471b3..27dee8f697 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -620,6 +620,19 @@ class RelationsTestCase(BaseRelationsTestCase): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) + # Directly requesting the edit should not have the edit to the edit applied. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{edit_event_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual("Wibble", channel.json_body["content"]["body"]) + self.assertIn("m.new_content", channel.json_body["content"]) + + # The relations information should not include the edit to the edit. + self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def test_unknown_relations(self) -> None: """Unknown relations should be accepted.""" channel = self._send_relation("m.relation.test", "m.room.test") @@ -984,6 +997,24 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) + def test_annotation_to_annotation(self) -> None: + """Any relation to an annotation should be ignored.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + event_id = channel.json_body["event_id"] + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=event_id + ) + + # Fetch the initial annotation event to see if it has bundled aggregations. + channel = self.make_request( + "GET", + f"/_matrix/client/v3/rooms/{self.room}/event/{event_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + # The first annotationt should not have any bundled aggregations. + self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def test_reference(self) -> None: """ Test that references get correctly bundled. From 9ae0253f4e9f7aafb64abe3b5f04ad067883a121 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Thu, 5 May 2022 14:31:25 +0200 Subject: [PATCH 47/87] Use `private` instead of `hidden` in MSC2285 related code. (#12635) --- changelog.d/12635.feature | 1 + synapse/config/experimental.py | 2 +- synapse/handlers/initial_sync.py | 4 ++-- synapse/handlers/receipts.py | 6 +++--- synapse/rest/client/versions.py | 2 +- tests/handlers/test_receipts.py | 32 ++++++++++++++++---------------- tests/rest/client/test_sync.py | 4 ++-- 7 files changed, 26 insertions(+), 25 deletions(-) create mode 100644 changelog.d/12635.feature diff --git a/changelog.d/12635.feature b/changelog.d/12635.feature new file mode 100644 index 0000000000..cd5c45029e --- /dev/null +++ b/changelog.d/12635.feature @@ -0,0 +1 @@ +Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 421ed7481b..abed5e7edb 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -32,7 +32,7 @@ class ExperimentalConfig(Config): # MSC2716 (importing historical messages) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) - # MSC2285 (hidden read receipts) + # MSC2285 (private read receipts) self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) # MSC3244 (room version capabilities) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index a7db8feb57..7b94770f97 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -143,7 +143,7 @@ class InitialSyncHandler: to_key=int(now_token.receipt_key), ) if self.hs.config.experimental.msc2285_enabled: - receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id) + receipt = ReceiptEventSource.filter_out_private(receipt, user_id) tags_by_room = await self.store.get_tags_for_user(user_id) @@ -449,7 +449,7 @@ class InitialSyncHandler: if not receipts: return [] if self.hs.config.experimental.msc2285_enabled: - receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id) + receipts = ReceiptEventSource.filter_out_private(receipts, user_id) return receipts presence, receipts, (messages, token) = await make_deferred_yieldable( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index ae41fd674e..43d615357b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -165,7 +165,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): self.config = hs.config @staticmethod - def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]: + def filter_out_private(events: List[JsonDict], user_id: str) -> List[JsonDict]: """ This method takes in what is returned by get_linearized_receipts_for_rooms() and goes through read receipts @@ -175,7 +175,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): visible_events = [] - # filter out hidden receipts the user shouldn't see + # filter out private receipts the user shouldn't see for event in events: content = event.get("content", {}) new_event = event.copy() @@ -223,7 +223,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): ) if self.config.experimental.msc2285_enabled: - events = ReceiptEventSource.filter_out_hidden(events, user.to_string()) + events = ReceiptEventSource.filter_out_private(events, user.to_string()) return events, to_key diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index bfc1d4ee08..c1bd775fec 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -93,7 +93,7 @@ class VersionsRestServlet(RestServlet): "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private, # Supports the busy presence state described in MSC3026. "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, - # Supports receiving hidden read receipts as per MSC2285 + # Supports receiving private read receipts as per MSC2285 "org.matrix.msc2285": self.config.experimental.msc2285_enabled, # Adds support for importing historical messages as per MSC2716 "org.matrix.msc2716": self.config.experimental.msc2716_enabled, diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index c12a9120f0..0482a1ea34 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -25,8 +25,8 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources.receipt - def test_filters_out_hidden_receipt(self): - self._test_filters_hidden( + def test_filters_out_private_receipt(self): + self._test_filters_private( [ { "content": { @@ -45,8 +45,8 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): [], ) - def test_filters_out_hidden_receipt_and_ignores_rest(self): - self._test_filters_hidden( + def test_filters_out_private_receipt_and_ignores_rest(self): + self._test_filters_private( [ { "content": { @@ -84,8 +84,8 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest(self): - self._test_filters_hidden( + def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest(self): + self._test_filters_private( [ { "content": { @@ -126,7 +126,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ) def test_handles_missing_content_of_m_read(self): - self._test_filters_hidden( + self._test_filters_private( [ { "content": { @@ -162,7 +162,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ) def test_handles_empty_event(self): - self._test_filters_hidden( + self._test_filters_private( [ { "content": { @@ -196,8 +196,8 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_filters_out_receipt_event_with_only_hidden_receipt_and_ignores_rest(self): - self._test_filters_hidden( + def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest(self): + self._test_filters_private( [ { "content": { @@ -249,7 +249,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): Context: https://github.com/matrix-org/synapse/issues/10603 """ - self._test_filters_hidden( + self._test_filters_private( [ { "content": { @@ -278,8 +278,8 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def test_leaves_our_hidden_and_their_public(self): - self._test_filters_hidden( + def test_leaves_our_private_and_their_public(self): + self._test_filters_private( [ { "content": { @@ -332,9 +332,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): ], ) - def _test_filters_hidden( + def _test_filters_private( self, events: List[JsonDict], expected_output: List[JsonDict] ): - """Tests that the _filter_out_hidden returns the expected output""" - filtered_events = self.event_source.filter_out_hidden(events, "@me:server.org") + """Tests that the _filter_out_private returns the expected output""" + filtered_events = self.event_source.filter_out_private(events, "@me:server.org") self.assertEqual(filtered_events, expected_output) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 67c94dd18f..0108337649 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -407,7 +407,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) @override_config({"experimental_features": {"msc2285_enabled": True}}) - def test_hidden_read_receipts(self) -> None: + def test_private_read_receipts(self) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) @@ -639,7 +639,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): # Check that the unread counter is back to 0. self._check_unread_count(0) - # Check that hidden read receipts don't break unread counts + # Check that private read receipts don't break unread counts res = self.helper.send(self.room_id, "hello", tok=self.tok2) self._check_unread_count(1) From b8fa24b022df366495147b5ddce73c06ec9cbc3a Mon Sep 17 00:00:00 2001 From: Henry <97804910+henryclw@users.noreply.github.com> Date: Thu, 5 May 2022 05:36:42 -0700 Subject: [PATCH 48/87] Use `docker/metadata-action` to generate docker image tags (#12573) Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Signed-off-by: Henry <97804910+henryclw@users.noreply.github.com> --- .github/workflows/docker.yml | 30 +++++++++++------------------- changelog.d/12573.docker | 1 + 2 files changed, 12 insertions(+), 19 deletions(-) create mode 100644 changelog.d/12573.docker diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 124b17458f..d20d30c035 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -34,32 +34,24 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - # TODO: consider using https://github.com/docker/metadata-action instead of this - # custom magic - name: Calculate docker image tag id: set-tag - run: | - case "${GITHUB_REF}" in - refs/heads/develop) - tag=develop - ;; - refs/heads/master|refs/heads/main) - tag=latest - ;; - refs/tags/*) - tag=${GITHUB_REF#refs/tags/} - ;; - *) - tag=${GITHUB_SHA} - ;; - esac - echo "::set-output name=tag::$tag" + uses: docker/metadata-action@master + with: + images: matrixdotorg/synapse + flavor: | + latest=false + tags: | + type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }} + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }} + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + type=pep440,pattern={{raw}} - name: Build and push all platforms uses: docker/build-push-action@v2 with: push: true labels: "gitsha1=${{ github.sha }}" - tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}" + tags: "${{ steps.set-tag.outputs.tags }}" file: "docker/Dockerfile" platforms: linux/amd64,linux/arm64 diff --git a/changelog.d/12573.docker b/changelog.d/12573.docker new file mode 100644 index 0000000000..5cc8de50ac --- /dev/null +++ b/changelog.d/12573.docker @@ -0,0 +1 @@ +Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by henryclw. \ No newline at end of file From 07fa53ec40106b97ba2c12a2dcc5446325e5fb61 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 5 May 2022 13:39:59 +0100 Subject: [PATCH 49/87] Improve comments and error messages around access tokens. (#12577) --- changelog.d/12577.misc | 1 + synapse/api/auth.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 changelog.d/12577.misc diff --git a/changelog.d/12577.misc b/changelog.d/12577.misc new file mode 100644 index 0000000000..8c4c47ad52 --- /dev/null +++ b/changelog.d/12577.misc @@ -0,0 +1 @@ +Improve comments and error messages around access tokens. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index f6202ef7a5..931750668e 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -417,7 +417,8 @@ class Auth: """ if rights == "access": - # first look in the database + # First look in the database to see if the access token is present + # as an opaque token. r = await self.store.get_user_by_access_token(token) if r: valid_until_ms = r.valid_until_ms @@ -434,7 +435,8 @@ class Auth: return r - # otherwise it needs to be a valid macaroon + # If the token isn't found in the database, then it could still be a + # macaroon, so we check that here. try: user_id, guest = self._parse_and_validate_macaroon(token, rights) @@ -482,8 +484,12 @@ class Auth: TypeError, ValueError, ) as e: - logger.warning("Invalid macaroon in auth: %s %s", type(e), e) - raise InvalidClientTokenError("Invalid macaroon passed.") + logger.warning( + "Invalid access token in auth: %s %s.", + type(e), + e, + ) + raise InvalidClientTokenError("Invalid access token passed.") def _parse_and_validate_macaroon( self, token: str, rights: str = "access" @@ -504,10 +510,7 @@ class Auth: try: macaroon = pymacaroons.Macaroon.deserialize(token) except Exception: # deserialize can throw more-or-less anything - # doesn't look like a macaroon: treat it as an opaque token which - # must be in the database. - # TODO: it would be nice to get rid of this, but apparently some - # people use access tokens which aren't macaroons + # The access token doesn't look like a macaroon. raise _InvalidMacaroonException() try: From ef86cf3d281d09d8ff4ee64af7a8c48d2516e2fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Thu, 5 May 2022 15:25:51 +0200 Subject: [PATCH 50/87] Update `_on_new_receipts()` to work with MSC2285 changes. (#12636) --- changelog.d/12636.feature | 1 + synapse/replication/tcp/client.py | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12636.feature diff --git a/changelog.d/12636.feature b/changelog.d/12636.feature new file mode 100644 index 0000000000..cd5c45029e --- /dev/null +++ b/changelog.d/12636.feature @@ -0,0 +1 @@ +Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 122892c7bc..350762f494 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -21,7 +21,7 @@ from twisted.internet.interfaces import IAddress, IConnector from twisted.internet.protocol import ReconnectingClientFactory from twisted.python.failure import Failure -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable @@ -401,10 +401,8 @@ class FederationSenderHandler: # we only want to send on receipts for our own users if not self._is_mine_id(receipt.user_id): continue - if ( - receipt.data.get("hidden", False) - and self._hs.config.experimental.msc2285_enabled - ): + # Private read receipts never get sent over federation. + if receipt.receipt_type == ReceiptTypes.READ_PRIVATE: continue receipt_info = ReadReceipt( receipt.room_id, From e923fc20bda436a894301350ac9ab2b4bb942e18 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 5 May 2022 14:51:15 +0100 Subject: [PATCH 51/87] Include extra dependency groups 'systemd' and 'cache_memory' in debian packages (#12640) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- debian/build_virtualenv | 7 ++++++- debian/changelog | 7 +++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/debian/build_virtualenv b/debian/build_virtualenv index b068792592..d2955f7628 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -37,7 +37,12 @@ python3 -m venv "$TEMP_VENV" source "$TEMP_VENV/bin/activate" pip install -U pip pip install poetry==1.2.0b1 -poetry export --extras all --extras test -o exported_requirements.txt +poetry export \ + --extras all \ + --extras test \ + --extras systemd \ + --extras cache_memory \ + -o exported_requirements.txt deactivate rm -rf "$TEMP_VENV" diff --git a/debian/changelog b/debian/changelog index 53b2387776..c828514752 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.58.0+nmu1) UNRELEASED; urgency=medium + + * Include python dependencies from the `systemd` and `cache_memory` extras package groups, which + were incorrectly omitted from the 1.58.0 package. + + -- Synapse Packaging team Thu, 05 May 2022 12:26:36 +0100 + matrix-synapse-py3 (1.58.0) stable; urgency=medium * New Synapse release 1.58.0. From 6a17a291a6692bc2a0d48ea052478f4014a19281 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 5 May 2022 15:05:58 +0100 Subject: [PATCH 52/87] 1.58.1 --- CHANGES.md | 11 +++++++++++ debian/changelog | 5 +++-- pyproject.toml | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 31f1561274..d221a37faf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.58.1 (2022-05-05) +=========================== + +This patch release includes a fix to the Debian packages, installing the +`systemd` and `cache_memory` extra package groups, which were incorrectly +omitted in v1.58.0. This primarily prevented Synapse from starting +when the `systemd.journal.JournalHandler` log handler was configured. + +No significant changes since 1.58.0. + + Synapse 1.58.0 (2022-05-03) =========================== diff --git a/debian/changelog b/debian/changelog index c828514752..5440f91bc0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,10 @@ -matrix-synapse-py3 (1.58.0+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.58.1) stable; urgency=medium * Include python dependencies from the `systemd` and `cache_memory` extras package groups, which were incorrectly omitted from the 1.58.0 package. + * New Synapse release 1.58.1. - -- Synapse Packaging team Thu, 05 May 2022 12:26:36 +0100 + -- Synapse Packaging team Thu, 05 May 2022 14:58:23 +0100 matrix-synapse-py3 (1.58.0) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index f0f029f016..4167abb233 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.58.0" +version = "1.58.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From d2784b6567ccfdb08d017d3f83a6d5c1b42cc8f3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 5 May 2022 15:06:39 +0100 Subject: [PATCH 53/87] Minor wording change to v1.58.1 release notes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d221a37faf..c046e96eb1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ This patch release includes a fix to the Debian packages, installing the omitted in v1.58.0. This primarily prevented Synapse from starting when the `systemd.journal.JournalHandler` log handler was configured. -No significant changes since 1.58.0. +Otherwise, no significant changes since 1.58.0. Synapse 1.58.0 (2022-05-03) From bc149a18f6b5e12792e14c295f294adda39cffc8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 5 May 2022 15:10:24 +0100 Subject: [PATCH 54/87] link to relevant bug report in v1.58.1 changelog --- CHANGES.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c046e96eb1..07541c803c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,8 @@ Synapse 1.58.1 (2022-05-05) This patch release includes a fix to the Debian packages, installing the `systemd` and `cache_memory` extra package groups, which were incorrectly omitted in v1.58.0. This primarily prevented Synapse from starting -when the `systemd.journal.JournalHandler` log handler was configured. +when the `systemd.journal.JournalHandler` log handler was configured +(closes [#12631](https://github.com/matrix-org/synapse/issues/12631)). Otherwise, no significant changes since 1.58.0. From 3a8ee229112697b02b876299869d7511474ecf92 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 5 May 2022 15:15:32 +0100 Subject: [PATCH 55/87] Update v1.58.1 changelog entry with more familiar language --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 07541c803c..982f09b1db 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,8 +4,8 @@ Synapse 1.58.1 (2022-05-05) This patch release includes a fix to the Debian packages, installing the `systemd` and `cache_memory` extra package groups, which were incorrectly omitted in v1.58.0. This primarily prevented Synapse from starting -when the `systemd.journal.JournalHandler` log handler was configured -(closes [#12631](https://github.com/matrix-org/synapse/issues/12631)). +when the `systemd.journal.JournalHandler` log handler was configured. +See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information. Otherwise, no significant changes since 1.58.0. From a377a43386a14c8a11fe9e99c821d0471f7982a2 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Thu, 5 May 2022 14:25:00 +0000 Subject: [PATCH 56/87] Support MSC3266 room summaries over federation (#11507) Signed-off-by: Nicolas Werner --- changelog.d/11507.feature | 1 + synapse/federation/federation_client.py | 2 + synapse/handlers/room_summary.py | 54 ++++++++++++++++++++++--- tests/handlers/test_room_summary.py | 26 ++++++++++++ 4 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11507.feature diff --git a/changelog.d/11507.feature b/changelog.d/11507.feature new file mode 100644 index 0000000000..72c5690cca --- /dev/null +++ b/changelog.d/11507.feature @@ -0,0 +1 @@ +Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6a59cb4b71..b5e0b84cbc 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1426,6 +1426,8 @@ class FederationClient(FederationBase): room = res.get("room") if not isinstance(room, dict): raise InvalidResponseError("'room' must be a dict") + if room.get("room_id") != room_id: + raise InvalidResponseError("wrong room returned in hierarchy response") # Validate children_state of the room. children_state = room.pop("children_state", []) diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 486145f48a..ff24ec8063 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -105,6 +105,7 @@ class RoomSummaryHandler: hs.get_clock(), "get_room_hierarchy", ) + self._msc3266_enabled = hs.config.experimental.msc3266_enabled async def get_room_hierarchy( self, @@ -630,7 +631,7 @@ class RoomSummaryHandler: return False async def _is_remote_room_accessible( - self, requester: str, room_id: str, room: JsonDict + self, requester: Optional[str], room_id: str, room: JsonDict ) -> bool: """ Calculate whether the room received over federation should be shown to the requester. @@ -645,7 +646,8 @@ class RoomSummaryHandler: due to an invite, etc. Args: - requester: The user requesting the summary. + requester: The user requesting the summary. If not passed only world + readability is checked. room_id: The room ID returned over federation. room: The summary of the room returned over federation. @@ -659,6 +661,8 @@ class RoomSummaryHandler: or room.get("world_readable") is True ): return True + elif not requester: + return False # Check if the user is a member of any of the allowed rooms from the response. allowed_rooms = room.get("allowed_room_ids") @@ -715,6 +719,10 @@ class RoomSummaryHandler: "room_type": create_event.content.get(EventContentFields.ROOM_TYPE), } + if self._msc3266_enabled: + entry["im.nheko.summary.version"] = stats["version"] + entry["im.nheko.summary.encryption"] = stats["encryption"] + # Federation requests need to provide additional information so the # requested server is able to filter the response appropriately. if for_federation: @@ -812,9 +820,45 @@ class RoomSummaryHandler: room_summary["membership"] = membership or "leave" else: - # TODO federation API, descoped from initial unstable implementation - # as MSC needs more maturing on that side. - raise SynapseError(400, "Federation is not currently supported.") + # Reuse the hierarchy query over federation + if remote_room_hosts is None: + raise SynapseError(400, "Missing via to query remote room") + + ( + room_entry, + children_room_entries, + inaccessible_children, + ) = await self._summarize_remote_room_hierarchy( + _RoomQueueEntry(room_id, remote_room_hosts), + suggested_only=True, + ) + + # The results over federation might include rooms that we, as the + # requesting server, are allowed to see, but the requesting user is + # not permitted to see. + # + # Filter the returned results to only what is accessible to the user. + if not room_entry or not await self._is_remote_room_accessible( + requester, room_entry.room_id, room_entry.room + ): + raise NotFoundError("Room not found or is not accessible") + + room = dict(room_entry.room) + room.pop("allowed_room_ids", None) + + # If there was a requester, add their membership. + # We keep the membership in the local membership table unless the + # room is purged even for remote rooms. + if requester: + ( + membership, + _, + ) = await self._store.get_local_current_membership_for_user_in_room( + requester, room_id + ) + room["membership"] = membership or "leave" + + return room return room_summary diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index d37292ce13..e74eb71774 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -1092,3 +1092,29 @@ class RoomSummaryTestCase(unittest.HomeserverTestCase): ) result = self.get_success(self.handler.get_room_summary(user2, self.room)) self.assertEqual(result.get("room_id"), self.room) + + def test_fed(self): + """ + Return data over federation and ensure that it is handled properly. + """ + fed_hostname = self.hs.hostname + "2" + fed_room = "#fed_room:" + fed_hostname + + requested_room_entry = _RoomEntry( + fed_room, + {"room_id": fed_room, "world_readable": True}, + ) + + async def summarize_remote_room_hierarchy(_self, room, suggested_only): + return requested_room_entry, {}, set() + + with mock.patch( + "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", + new=summarize_remote_room_hierarchy, + ): + result = self.get_success( + self.handler.get_room_summary( + self.user, fed_room, remote_room_hosts=[fed_hostname] + ) + ) + self.assertEqual(result.get("room_id"), fed_room) From c2d50e9f6c5f7b01cbd8bf1dca36cb8c0e7b007f Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 6 May 2022 11:43:53 +0100 Subject: [PATCH 57/87] Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. (#12452) --- changelog.d/12452.feature | 1 + docker/configure_workers_and_start.py | 4 +- docs/upgrade.md | 27 +++ docs/workers.md | 20 ++ synapse/app/generic_worker.py | 16 -- synapse/config/appservice.py | 1 - synapse/config/workers.py | 109 +++++++++- synapse/handlers/appservice.py | 2 +- tests/config/test_workers.py | 288 ++++++++++++++++++++++++++ 9 files changed, 447 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12452.feature create mode 100644 tests/config/test_workers.py diff --git a/changelog.d/12452.feature b/changelog.d/12452.feature new file mode 100644 index 0000000000..22f054d344 --- /dev/null +++ b/changelog.d/12452.feature @@ -0,0 +1 @@ +Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 33fc20d218..b2b7938ae8 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -69,10 +69,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "enable_media_repo: true", }, "appservice": { - "app": "synapse.app.appservice", + "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"notify_appservices": False}, + "shared_extra_conf": {"notify_appservices_from_worker": "appservice"}, "worker_extra_conf": "", }, "federation_sender": { diff --git a/docs/upgrade.md b/docs/upgrade.md index b40cac86f0..18c33a4198 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -100,6 +100,32 @@ To re-enable this functionality, set the [`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation) homeserver config option to `true`. + +## Deprecation of the `synapse.app.appservice` worker application type + +The `synapse.app.appservice` worker application type allowed you to configure a +single worker to use to notify application services of new events, as long +as this functionality was disabled on the main process with `notify_appservices: False`. + +To unify Synapse's worker types, the `synapse.app.appservice` worker application +type and the `notify_appservices` configuration option have been deprecated. + +To get the same functionality, it's now recommended that the `synapse.app.generic_worker` +worker application type is used and that the `notify_appservices_from_worker` option +is set to the name of a worker. + +For the time being, `notify_appservices_from_worker` can be used alongside +`synapse.app.appservice` and `notify_appservices` to make it easier to transition +between the two configurations, however please note that: + +- the options must not contradict each other (otherwise Synapse won't start); and +- the `notify_appservices` option will be removed in a future release of Synapse. + +Please see [the relevant section of the worker documentation][v1_59_notify_ases_from] for more information. + +[v1_59_notify_ases_from]: workers.md#notifying-application-services + + # Upgrading to v1.58.0 ## Groups/communities feature has been disabled by default @@ -107,6 +133,7 @@ homeserver config option to `true`. The non-standard groups/communities feature in Synapse has been disabled by default and will be removed in Synapse v1.61.0. + # Upgrading to v1.57.0 ## Changes to database schema for application services diff --git a/docs/workers.md b/docs/workers.md index afdcd785e4..1d049b6c4f 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -435,6 +435,23 @@ An example for a dedicated background worker instance: {{#include systemd-with-workers/workers/background_worker.yaml}} ``` +#### Notifying Application Services + +You can designate one worker to send output traffic to Application Services. + +Specify its name in the shared configuration as follows: + +```yaml +notify_appservices_from_worker: worker_name +``` + +This work cannot be load-balanced; please ensure the main process is restarted +after setting this option in the shared configuration! + +This style of configuration supersedes the legacy `synapse.app.appservice` +worker application type. + + ### `synapse.app.pusher` Handles sending push notifications to sygnal and email. Doesn't handle any @@ -453,6 +470,9 @@ pusher_instances: ### `synapse.app.appservice` +**Deprecated as of Synapse v1.58.** [Use `synapse.app.generic_worker` with the +`notify_appservices_from_worker` option instead.](#notifying-application-services) + Handles sending output traffic to Application Services. Doesn't handle any REST endpoints itself, but you should set `notify_appservices: False` in the shared configuration file to stop the main synapse sending appservice notifications. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1865c671f4..07dddc0b13 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -441,22 +441,6 @@ def start(config_options: List[str]) -> None: "synapse.app.user_dir", ) - if config.worker.worker_app == "synapse.app.appservice": - if config.appservice.notify_appservices: - sys.stderr.write( - "\nThe appservices must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``notify_appservices: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the appservice to start since they will be disabled in the main config - config.appservice.notify_appservices = True - else: - # For other worker types we force this to off. - config.appservice.notify_appservices = False - if config.worker.worker_app == "synapse.app.user_dir": if config.server.update_user_directory: sys.stderr.write( diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 720b90a283..b13cc6bb6e 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -33,7 +33,6 @@ class AppServiceConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.app_service_config_files = config.get("app_service_config_files", []) - self.notify_appservices = config.get("notify_appservices", True) self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) def generate_config_section(cls, **kwargs: Any) -> str: diff --git a/synapse/config/workers.py b/synapse/config/workers.py index a5479dfca9..a9dbcc6d3d 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -14,7 +14,8 @@ # limitations under the License. import argparse -from typing import Any, List, Union +import logging +from typing import Any, Dict, List, Union import attr @@ -42,6 +43,13 @@ synapse process before they can be run in a separate worker. Please add ``start_pushers: false`` to the main config """ +_DEPRECATED_WORKER_DUTY_OPTION_USED = """ +The '%s' configuration option is deprecated and will be removed in a future +Synapse version. Please use ``%s: name_of_worker`` instead. +""" + +logger = logging.getLogger(__name__) + def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: """Helper for allowing parsing a string or list of strings to a config @@ -296,6 +304,105 @@ class WorkerConfig(Config): self.worker_name is None and background_tasks_instance == "master" ) or self.worker_name == background_tasks_instance + self.should_notify_appservices = self._should_this_worker_perform_duty( + config, + legacy_master_option_name="notify_appservices", + legacy_worker_app_name="synapse.app.appservice", + new_option_name="notify_appservices_from_worker", + ) + + def _should_this_worker_perform_duty( + self, + config: Dict[str, Any], + legacy_master_option_name: str, + legacy_worker_app_name: str, + new_option_name: str, + ) -> bool: + """ + Figures out whether this worker should perform a certain duty. + + This function is temporary and is only to deal with the complexity + of allowing old, transitional and new configurations all at once. + + Contradictions between the legacy and new part of a transitional configuration + will lead to a ConfigError. + + Parameters: + config: The config dictionary + legacy_master_option_name: The name of a legacy option, whose value is boolean, + specifying whether it's the master that should handle a certain duty. + e.g. "notify_appservices" + legacy_worker_app_name: The name of a legacy Synapse worker application + that would traditionally perform this duty. + e.g. "synapse.app.appservice" + new_option_name: The name of the new option, whose value is the name of a + designated worker to perform the duty. + e.g. "notify_appservices_from_worker" + """ + + # None means 'unspecified'; True means 'run here' and False means + # 'don't run here'. + new_option_should_run_here = None + if new_option_name in config: + designated_worker = config[new_option_name] or "master" + new_option_should_run_here = ( + designated_worker == "master" and self.worker_name is None + ) or designated_worker == self.worker_name + + legacy_option_should_run_here = None + if legacy_master_option_name in config: + run_on_master = bool(config[legacy_master_option_name]) + + legacy_option_should_run_here = ( + self.worker_name is None and run_on_master + ) or (self.worker_app == legacy_worker_app_name and not run_on_master) + + # Suggest using the new option instead. + logger.warning( + _DEPRECATED_WORKER_DUTY_OPTION_USED, + legacy_master_option_name, + new_option_name, + ) + + if self.worker_app == legacy_worker_app_name and config.get( + legacy_master_option_name, True + ): + # As an extra bit of complication, we need to check that the + # specialised worker is only used if the legacy config says the + # master isn't performing the duties. + raise ConfigError( + f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n" + f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n" + f"The '{new_option_name}' option replaces '{legacy_master_option_name}'." + ) + + if new_option_should_run_here is None and legacy_option_should_run_here is None: + # Neither option specified; the fallback behaviour is to run on the main process + return self.worker_name is None + + if ( + new_option_should_run_here is not None + and legacy_option_should_run_here is not None + ): + # Both options specified; ensure they match! + if new_option_should_run_here != legacy_option_should_run_here: + update_worker_type = ( + " and set worker_app: synapse.app.generic_worker" + if self.worker_app == legacy_worker_app_name + else "" + ) + # If the values conflict, we suggest the admin removes the legacy option + # for simplicity. + raise ConfigError( + f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n" + f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n" + ) + + # We've already validated that these aren't conflicting; now just see if + # either is True. + # (By this point, these are either the same value or only one is not None.) + return bool(new_option_should_run_here or legacy_option_should_run_here) + def generate_config_section(self, **kwargs: Any) -> str: return """\ ## Workers ## diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index b3894666cc..85bd5e4768 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -59,7 +59,7 @@ class ApplicationServicesHandler: self.scheduler = hs.get_application_service_scheduler() self.started_scheduler = False self.clock = hs.get_clock() - self.notify_appservices = hs.config.appservice.notify_appservices + self.notify_appservices = hs.config.worker.should_notify_appservices self.event_sources = hs.get_event_sources() self._msc2409_to_device_messages_enabled = ( hs.config.experimental.msc2409_to_device_messages_enabled diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py new file mode 100644 index 0000000000..da81bb9655 --- /dev/null +++ b/tests/config/test_workers.py @@ -0,0 +1,288 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Mapping, Optional +from unittest.mock import Mock + +from frozendict import frozendict + +from synapse.config import ConfigError +from synapse.config.workers import WorkerConfig + +from tests.unittest import TestCase + +_EMPTY_FROZENDICT: Mapping[str, Any] = frozendict() + + +class WorkerDutyConfigTestCase(TestCase): + def _make_worker_config( + self, + worker_app: str, + worker_name: Optional[str], + extras: Mapping[str, Any] = _EMPTY_FROZENDICT, + ) -> WorkerConfig: + root_config = Mock() + root_config.worker_app = worker_app + root_config.worker_name = worker_name + worker_config = WorkerConfig(root_config) + worker_config_dict = { + "worker_name": worker_name, + "worker_app": worker_app, + **extras, + } + worker_config.read_config(worker_config_dict) + return worker_config + + def test_old_configs_master(self) -> None: + """ + Tests old (legacy) config options. This is for the master's config. + """ + main_process_config = self._make_worker_config( + worker_app="synapse.app.homeserver", worker_name=None + ) + + self.assertTrue( + main_process_config._should_this_worker_perform_duty( + {}, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + self.assertTrue( + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": True, + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + self.assertFalse( + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + def test_old_configs_appservice_worker(self) -> None: + """ + Tests old (legacy) config options. This is for the worker's config. + """ + appservice_worker_config = self._make_worker_config( + worker_app="synapse.app.appservice", + worker_name="worker1", + extras={ + # Set notify_appservices to false for the initialiser's config, + # so that it doesn't raise an exception here. + # (This is not read by `_should_this_worker_perform_duty`.) + "notify_appservices": False, + }, + ) + + with self.assertRaises(ConfigError): + # This raises because you need to set notify_appservices: False + # before using the synapse.app.appservice worker type + self.assertFalse( + appservice_worker_config._should_this_worker_perform_duty( + {}, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + with self.assertRaises(ConfigError): + # This also raises because you need to set notify_appservices: False + # before using the synapse.app.appservice worker type + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices": True, + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + + self.assertTrue( + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + def test_transitional_configs_master(self) -> None: + """ + Tests transitional (legacy + new) config options. This is for the master's config. + """ + + main_process_config = self._make_worker_config( + worker_app="synapse.app.homeserver", worker_name=None + ) + + self.assertTrue( + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": True, + "notify_appservices_from_worker": "master", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + self.assertFalse( + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + "notify_appservices_from_worker": "worker1", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + with self.assertRaises(ConfigError): + # Contradictory because we say the master should notify appservices, + # then we say worker1 is the designated worker to do that! + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": True, + "notify_appservices_from_worker": "worker1", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + + with self.assertRaises(ConfigError): + # Contradictory because we say the master shouldn't notify appservices, + # then we say master is the designated worker to do that! + main_process_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + "notify_appservices_from_worker": "master", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + + def test_transitional_configs_appservice_worker(self) -> None: + """ + Tests transitional (legacy + new) config options. This is for the worker's config. + """ + appservice_worker_config = self._make_worker_config( + worker_app="synapse.app.appservice", + worker_name="worker1", + extras={ + # Set notify_appservices to false for the initialiser's config, + # so that it doesn't raise an exception here. + # (This is not read by `_should_this_worker_perform_duty`.) + "notify_appservices": False, + }, + ) + + self.assertTrue( + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + "notify_appservices_from_worker": "worker1", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + with self.assertRaises(ConfigError): + # This raises because this worker is the appservice app type, yet + # another worker is the designated worker! + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices": False, + "notify_appservices_from_worker": "worker2", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + + def test_new_configs_master(self) -> None: + """ + Tests new config options. This is for the master's config. + """ + main_process_config = self._make_worker_config( + worker_app="synapse.app.homeserver", worker_name=None + ) + + self.assertTrue( + main_process_config._should_this_worker_perform_duty( + {"notify_appservices_from_worker": None}, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + self.assertFalse( + main_process_config._should_this_worker_perform_duty( + {"notify_appservices_from_worker": "worker1"}, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + def test_new_configs_appservice_worker(self) -> None: + """ + Tests new config options. This is for the worker's config. + """ + appservice_worker_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", worker_name="worker1" + ) + + self.assertTrue( + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices_from_worker": "worker1", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) + + self.assertFalse( + appservice_worker_config._should_this_worker_perform_duty( + { + "notify_appservices_from_worker": "worker2", + }, + "notify_appservices", + "synapse.app.appservice", + "notify_appservices_from_worker", + ) + ) From 2607b3e1816341b3b8534077bd5d3a4daf3a3d15 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 6 May 2022 13:35:20 +0100 Subject: [PATCH 58/87] Update mypy to 0.950 and fix complaints (#12650) --- changelog.d/12650.misc | 1 + poetry.lock | 64 ++++++++++--------- pyproject.toml | 4 +- stubs/sortedcontainers/sorteddict.pyi | 19 ++++-- synapse/appservice/api.py | 3 +- synapse/config/appservice.py | 3 +- synapse/events/presence_router.py | 4 +- synapse/handlers/message.py | 3 +- synapse/metrics/background_process_metrics.py | 44 ++++++++++--- tests/storage/test_monthly_active_users.py | 10 +-- 10 files changed, 98 insertions(+), 57 deletions(-) create mode 100644 changelog.d/12650.misc diff --git a/changelog.d/12650.misc b/changelog.d/12650.misc new file mode 100644 index 0000000000..07bb4ce5a9 --- /dev/null +++ b/changelog.d/12650.misc @@ -0,0 +1 @@ +Update to mypy 0.950. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 89e78576b7..564ba7ec02 100644 --- a/poetry.lock +++ b/poetry.lock @@ -572,7 +572,7 @@ python-versions = "*" [[package]] name = "mypy" -version = "0.931" +version = "0.950" description = "Optional static typing for Python" category = "dev" optional = false @@ -580,13 +580,14 @@ python-versions = ">=3.6" [package.dependencies] mypy-extensions = ">=0.4.3" -tomli = ">=1.1.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] [[package]] name = "mypy-extensions" @@ -598,14 +599,14 @@ python-versions = "*" [[package]] name = "mypy-zope" -version = "0.3.5" +version = "0.3.7" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" [package.dependencies] -mypy = "0.931" +mypy = "0.950" "zope.interface" = "*" "zope.schema" = "*" @@ -1020,7 +1021,7 @@ jeepney = ">=0.6" [[package]] name = "sentry-sdk" -version = "1.5.7" +version = "1.5.11" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true @@ -1562,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "2bda1a7cfc8cc02832b4a7d16bf7e1615cb05e0639bdb30688aadf692d851942" +content-hash = "f24699464828ac1a63f1034b4a18c841ef585737b9a802fd8311836444f1d702" [metadata.files] attrs = [ @@ -2089,34 +2090,37 @@ msgpack = [ {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"}, ] mypy = [ - {file = "mypy-0.931-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c5b42d0815e15518b1f0990cff7a705805961613e701db60387e6fb663fe78a"}, - {file = "mypy-0.931-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c89702cac5b302f0c5d33b172d2b55b5df2bede3344a2fbed99ff96bddb2cf00"}, - {file = "mypy-0.931-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:300717a07ad09525401a508ef5d105e6b56646f7942eb92715a1c8d610149714"}, - {file = "mypy-0.931-cp310-cp310-win_amd64.whl", hash = "sha256:7b3f6f557ba4afc7f2ce6d3215d5db279bcf120b3cfd0add20a5d4f4abdae5bc"}, - {file = "mypy-0.931-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1bf752559797c897cdd2c65f7b60c2b6969ffe458417b8d947b8340cc9cec08d"}, - {file = "mypy-0.931-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4365c60266b95a3f216a3047f1d8e3f895da6c7402e9e1ddfab96393122cc58d"}, - {file = "mypy-0.931-cp36-cp36m-win_amd64.whl", hash = "sha256:1b65714dc296a7991000b6ee59a35b3f550e0073411ac9d3202f6516621ba66c"}, - {file = "mypy-0.931-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e839191b8da5b4e5d805f940537efcaa13ea5dd98418f06dc585d2891d228cf0"}, - {file = "mypy-0.931-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:50c7346a46dc76a4ed88f3277d4959de8a2bd0a0fa47fa87a4cde36fe247ac05"}, - {file = "mypy-0.931-cp37-cp37m-win_amd64.whl", hash = "sha256:d8f1ff62f7a879c9fe5917b3f9eb93a79b78aad47b533911b853a757223f72e7"}, - {file = "mypy-0.931-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f9fe20d0872b26c4bba1c1be02c5340de1019530302cf2dcc85c7f9fc3252ae0"}, - {file = "mypy-0.931-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1b06268df7eb53a8feea99cbfff77a6e2b205e70bf31743e786678ef87ee8069"}, - {file = "mypy-0.931-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c11003aaeaf7cc2d0f1bc101c1cc9454ec4cc9cb825aef3cafff8a5fdf4c799"}, - {file = "mypy-0.931-cp38-cp38-win_amd64.whl", hash = "sha256:d9d2b84b2007cea426e327d2483238f040c49405a6bf4074f605f0156c91a47a"}, - {file = "mypy-0.931-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff3bf387c14c805ab1388185dd22d6b210824e164d4bb324b195ff34e322d166"}, - {file = "mypy-0.931-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b56154f8c09427bae082b32275a21f500b24d93c88d69a5e82f3978018a0266"}, - {file = "mypy-0.931-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ca7f8c4b1584d63c9a0f827c37ba7a47226c19a23a753d52e5b5eddb201afcd"}, - {file = "mypy-0.931-cp39-cp39-win_amd64.whl", hash = "sha256:74f7eccbfd436abe9c352ad9fb65872cc0f1f0a868e9d9c44db0893440f0c697"}, - {file = "mypy-0.931-py3-none-any.whl", hash = "sha256:1171f2e0859cfff2d366da2c7092b06130f232c636a3f7301e3feb8b41f6377d"}, - {file = "mypy-0.931.tar.gz", hash = "sha256:0038b21890867793581e4cb0d810829f5fd4441aa75796b53033af3aa30430ce"}, + {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"}, + {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"}, + {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"}, + {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"}, + {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"}, + {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"}, + {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"}, + {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"}, + {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"}, + {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"}, + {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"}, + {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"}, + {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"}, + {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"}, + {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"}, + {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"}, + {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"}, + {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"}, + {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"}, + {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"}, + {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"}, + {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"}, + {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] mypy-zope = [ - {file = "mypy-zope-0.3.5.tar.gz", hash = "sha256:489e7da1c2af887f2cfe3496995fc247f296512b495b57817edddda9d22308f3"}, - {file = "mypy_zope-0.3.5-py3-none-any.whl", hash = "sha256:3bd0cc9a3e5933b02931af4b214ba32a4f4ff98adb30c979ce733857db91a18b"}, + {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"}, + {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"}, ] netaddr = [ {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, @@ -2386,8 +2390,8 @@ secretstorage = [ {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, ] sentry-sdk = [ - {file = "sentry-sdk-1.5.7.tar.gz", hash = "sha256:aa52da941c56b5a76fd838f8e9e92a850bf893a9eb1e33ffce6c21431d07ee30"}, - {file = "sentry_sdk-1.5.7-py2.py3-none-any.whl", hash = "sha256:411a8495bd18cf13038e5749e4710beb4efa53da6351f67b4c2f307c2d9b6d49"}, + {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"}, + {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"}, ] service-identity = [ {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, diff --git a/pyproject.toml b/pyproject.toml index 446895711b..877f19708d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -248,8 +248,8 @@ flake8-bugbear = "==21.3.2" flake8 = "*" # Typechecking -mypy = "==0.931" -mypy-zope = "==0.3.5" +mypy = "*" +mypy-zope = "*" types-bleach = ">=4.1.0" types-commonmark = ">=0.9.2" types-jsonschema = ">=3.2.0" diff --git a/stubs/sortedcontainers/sorteddict.pyi b/stubs/sortedcontainers/sorteddict.pyi index 3a4f9c3076..7c399ab38d 100644 --- a/stubs/sortedcontainers/sorteddict.pyi +++ b/stubs/sortedcontainers/sorteddict.pyi @@ -85,12 +85,19 @@ class SortedDict(Dict[_KT, _VT]): def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ... def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ... def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ... - @overload - def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... - @overload - def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ... - @overload - def update(self, **kwargs: _VT) -> None: ... + # Mypy now reports the first overload as an error, because typeshed widened the type + # of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in + # https://github.com/python/typeshed/pull/6653 + # Since sorteddicts don't change the signature of `update` from that of `dict`, we + # let the stubs for `update` inherit from the stubs for `dict`. (I suspect we could + # do the same for many othe methods.) We leave the stubs commented to better track + # how this file has evolved from the original stubs. + # @overload + # def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... + # @overload + # def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ... + # @overload + # def update(self, **kwargs: _VT) -> None: ... def __reduce__( self, ) -> Tuple[ diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index adc6b074da..d19f8dd996 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -17,6 +17,7 @@ import urllib.parse from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple from prometheus_client import Counter +from typing_extensions import TypeGuard from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind from synapse.api.errors import CodeMessageException @@ -66,7 +67,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool: return True -def _is_valid_3pe_result(r: JsonDict, field: str) -> bool: +def _is_valid_3pe_result(r: object, field: str) -> TypeGuard[JsonDict]: if not isinstance(r, dict): return False diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index b13cc6bb6e..24498e7944 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -55,7 +55,8 @@ def load_appservices( ) -> List[ApplicationService]: """Returns a list of Application Services from the config files.""" if not isinstance(config_files, list): - logger.warning("Expected %s to be a list of AS config files.", config_files) + # type-ignore: this function gets arbitrary json value; we do use this path. + logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable] return [] # Dicts of value -> filename diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index a58f313e8b..98555c8c0c 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -147,7 +147,9 @@ class PresenceRouter: # run all the callbacks for get_users_for_states and combine the results for callback in self._get_users_for_states_callbacks: try: - result = await callback(state_updates) + # Note: result is an object here, because we don't trust modules to + # return the types they're supposed to. + result: object = await callback(state_updates) except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 95a89ac01f..c28b792e6f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1427,7 +1427,7 @@ class EventCreationHandler: # Validate a newly added alias or newly added alt_aliases. original_alias = None - original_alt_aliases: List[str] = [] + original_alt_aliases: object = [] original_event_id = event.unsigned.get("replaces_state") if original_event_id: @@ -1455,6 +1455,7 @@ class EventCreationHandler: # If the old version of alt_aliases is of an unknown form, # completely replace it. if not isinstance(original_alt_aliases, (list, tuple)): + # TODO: check that the original_alt_aliases' entries are all strings original_alt_aliases = [] # Check that each alias is currently valid. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index f61396bb79..298809742a 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -28,11 +28,11 @@ from typing import ( Type, TypeVar, Union, - cast, ) from prometheus_client import Metric from prometheus_client.core import REGISTRY, Counter, Gauge +from typing_extensions import ParamSpec from twisted.internet import defer @@ -256,24 +256,48 @@ def run_as_background_process( return defer.ensureDeferred(run()) -F = TypeVar("F", bound=Callable[..., Awaitable[Optional[Any]]]) +P = ParamSpec("P") -def wrap_as_background_process(desc: str) -> Callable[[F], F]: - """Decorator that wraps a function that gets called as a background - process. +def wrap_as_background_process( + desc: str, +) -> Callable[ + [Callable[P, Awaitable[Optional[R]]]], + Callable[P, "defer.Deferred[Optional[R]]"], +]: + """Decorator that wraps an asynchronous function `func`, returning a synchronous + decorated function. Calling the decorated version runs `func` as a background + process, forwarding all arguments verbatim. - Equivalent to calling the function with `run_as_background_process` + That is, + + @wrap_as_background_process + def func(*args): ... + func(1, 2, third=3) + + is equivalent to: + + def func(*args): ... + run_as_background_process(func, 1, 2, third=3) + + The former can be convenient if `func` needs to be run as a background process in + multiple places. """ - def wrap_as_background_process_inner(func: F) -> F: + def wrap_as_background_process_inner( + func: Callable[P, Awaitable[Optional[R]]] + ) -> Callable[P, "defer.Deferred[Optional[R]]"]: @wraps(func) def wrap_as_background_process_inner_2( - *args: Any, **kwargs: Any + *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[Optional[R]]": - return run_as_background_process(desc, func, *args, **kwargs) + # type-ignore: mypy is confusing kwargs with the bg_start_span kwarg. + # Argument 4 to "run_as_background_process" has incompatible type + # "**P.kwargs"; expected "bool" + # See https://github.com/python/mypy/issues/8862 + return run_as_background_process(desc, func, *args, **kwargs) # type: ignore[arg-type] - return cast(F, wrap_as_background_process_inner_2) + return wrap_as_background_process_inner_2 return wrap_as_background_process_inner diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0fbf465670..4c29ad79b6 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -222,10 +222,11 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.user_add_threepid(user, "email", email, now, now) ) - d = self.store.db_pool.runInteraction( - "initialise", self.store._initialise_reserved_users, threepids + self.get_success( + self.store.db_pool.runInteraction( + "initialise", self.store._initialise_reserved_users, threepids + ) ) - self.get_success(d) count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, initial_users) @@ -233,8 +234,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): users = self.get_success(self.store.get_registered_reserved_users()) self.assertEqual(len(users), reserved_user_number) - d = self.store.reap_monthly_active_users() - self.get_success(d) + self.get_success(self.store.reap_monthly_active_users()) count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, self.hs.config.server.max_mau_value) From 4337d33a73988763e27eb9450307084f8eab6f16 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 May 2022 17:41:57 +0100 Subject: [PATCH 59/87] Prevent memory leak from reoccurring when presence is disabled. (#12656) --- changelog.d/12656.misc | 1 + synapse/handlers/presence.py | 46 +++++++++++++++++++++++------------- synapse/util/wheel_timer.py | 39 ++++++++++++++++++++---------- 3 files changed, 56 insertions(+), 30 deletions(-) create mode 100644 changelog.d/12656.misc diff --git a/changelog.d/12656.misc b/changelog.d/12656.misc new file mode 100644 index 0000000000..8a8743e614 --- /dev/null +++ b/changelog.d/12656.misc @@ -0,0 +1 @@ +Prevent memory leak from reoccurring when presence is disabled. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d078162c29..268481ec19 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -659,27 +659,28 @@ class PresenceHandler(BasePresenceHandler): ) now = self.clock.time_msec() - for state in self.user_to_current_state.values(): - self.wheel_timer.insert( - now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER - ) - self.wheel_timer.insert( - now=now, - obj=state.user_id, - then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, - ) - if self.is_mine_id(state.user_id): + if self._presence_enabled: + for state in self.user_to_current_state.values(): + self.wheel_timer.insert( + now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER + ) self.wheel_timer.insert( now=now, obj=state.user_id, - then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL, - ) - else: - self.wheel_timer.insert( - now=now, - obj=state.user_id, - then=state.last_federation_update_ts + FEDERATION_TIMEOUT, + then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, ) + if self.is_mine_id(state.user_id): + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL, + ) + else: + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_federation_update_ts + FEDERATION_TIMEOUT, + ) # Set of users who have presence in the `user_to_current_state` that # have not yet been persisted @@ -804,6 +805,13 @@ class PresenceHandler(BasePresenceHandler): This is currently used to bump the max presence stream ID without changing any user's presence (see PresenceHandler.add_users_to_send_full_presence_to). """ + if not self._presence_enabled: + # We shouldn't get here if presence is disabled, but we check anyway + # to ensure that we don't a) send out presence federation and b) + # don't add things to the wheel timer that will never be handled. + logger.warning("Tried to update presence states when presence is disabled") + return + now = self.clock.time_msec() with Measure(self.clock, "presence_update_states"): @@ -1229,6 +1237,10 @@ class PresenceHandler(BasePresenceHandler): ): raise SynapseError(400, "Invalid presence state") + # If presence is disabled, no-op + if not self.hs.config.server.use_presence: + return + user_id = target_user.to_string() prev_state = await self.current_state_for_user(user_id) diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index e108adc460..177e198e7e 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -11,17 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Generic, List, TypeVar +import logging +from typing import Generic, Hashable, List, Set, TypeVar -T = TypeVar("T") +import attr + +logger = logging.getLogger(__name__) + +T = TypeVar("T", bound=Hashable) +@attr.s(slots=True, frozen=True, auto_attribs=True) class _Entry(Generic[T]): - __slots__ = ["end_key", "queue"] - - def __init__(self, end_key: int) -> None: - self.end_key: int = end_key - self.queue: List[T] = [] + end_key: int + elements: Set[T] = attr.Factory(set) class WheelTimer(Generic[T]): @@ -48,17 +51,27 @@ class WheelTimer(Generic[T]): then: When to return the object strictly after. """ then_key = int(then / self.bucket_size) + 1 + now_key = int(now / self.bucket_size) if self.entries: min_key = self.entries[0].end_key max_key = self.entries[-1].end_key + if min_key < now_key - 10: + # If we have ten buckets that are due and still nothing has + # called `fetch()` then we likely have a bug that is causing a + # memory leak. + logger.warning( + "Inserting into a wheel timer that hasn't been read from recently. Item: %s", + obj, + ) + if then_key <= max_key: # The max here is to protect against inserts for times in the past - self.entries[max(min_key, then_key) - min_key].queue.append(obj) + self.entries[max(min_key, then_key) - min_key].elements.add(obj) return - next_key = int(now / self.bucket_size) + 1 + next_key = now_key + 1 if self.entries: last_key = self.entries[-1].end_key else: @@ -71,7 +84,7 @@ class WheelTimer(Generic[T]): # to insert. This ensures there are no gaps. self.entries.extend(_Entry(key) for key in range(last_key, then_key + 1)) - self.entries[-1].queue.append(obj) + self.entries[-1].elements.add(obj) def fetch(self, now: int) -> List[T]: """Fetch any objects that have timed out @@ -84,11 +97,11 @@ class WheelTimer(Generic[T]): """ now_key = int(now / self.bucket_size) - ret = [] + ret: List[T] = [] while self.entries and self.entries[0].end_key <= now_key: - ret.extend(self.entries.pop(0).queue) + ret.extend(self.entries.pop(0).elements) return ret def __len__(self) -> int: - return sum(len(entry.queue) for entry in self.entries) + return sum(len(entry.elements) for entry in self.entries) From 051a1c3f220938a0ea1a5b328c268bdb3d1ad592 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Sat, 7 May 2022 13:37:29 +0100 Subject: [PATCH 60/87] Convert stringy power levels to integers on room upgrade (#12657) --- changelog.d/12657.bugfix | 1 + synapse/events/utils.py | 61 +++++++++++++++++++------- synapse/handlers/room.py | 14 +++--- tests/events/test_utils.py | 41 ++++++++++++++++- tests/rest/client/test_upgrade_room.py | 44 +++++++++++++++++++ 5 files changed, 137 insertions(+), 24 deletions(-) create mode 100644 changelog.d/12657.bugfix diff --git a/changelog.d/12657.bugfix b/changelog.d/12657.bugfix new file mode 100644 index 0000000000..7547ca40a7 --- /dev/null +++ b/changelog.d/12657.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 026dcde8d8..ac91c5eb57 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -22,6 +22,7 @@ from typing import ( Iterable, List, Mapping, + MutableMapping, Optional, Union, ) @@ -580,10 +581,20 @@ class EventClientSerializer: ] -def copy_power_levels_contents( - old_power_levels: Mapping[str, Union[int, Mapping[str, int]]] +_PowerLevel = Union[str, int] + + +def copy_and_fixup_power_levels_contents( + old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] ) -> Dict[str, Union[int, Dict[str, int]]]: - """Copy the content of a power_levels event, unfreezing frozendicts along the way + """Copy the content of a power_levels event, unfreezing frozendicts along the way. + + We accept as input power level values which are strings, provided they represent an + integer, e.g. `"`100"` instead of 100. Such strings are converted to integers + in the returned dictionary (hence "fixup" in the function name). + + Note that future room versions will outlaw such stringy power levels (see + https://github.com/matrix-org/matrix-spec/issues/853). Raises: TypeError if the input does not look like a valid power levels event content @@ -592,29 +603,47 @@ def copy_power_levels_contents( raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) power_levels: Dict[str, Union[int, Dict[str, int]]] = {} + for k, v in old_power_levels.items(): - - if isinstance(v, int): - power_levels[k] = v - continue - if isinstance(v, collections.abc.Mapping): h: Dict[str, int] = {} power_levels[k] = h for k1, v1 in v.items(): - # we should only have one level of nesting - if not isinstance(v1, int): - raise TypeError( - "Invalid power_levels value for %s.%s: %r" % (k, k1, v1) - ) - h[k1] = v1 - continue + _copy_power_level_value_as_integer(v1, h, k1) - raise TypeError("Invalid power_levels value for %s: %r" % (k, v)) + else: + _copy_power_level_value_as_integer(v, power_levels, k) return power_levels +def _copy_power_level_value_as_integer( + old_value: object, + power_levels: MutableMapping[str, Any], + key: str, +) -> None: + """Set `power_levels[key]` to the integer represented by `old_value`. + + :raises TypeError: if `old_value` is not an integer, nor a base-10 string + representation of an integer. + """ + if isinstance(old_value, int): + power_levels[key] = old_value + return + + if isinstance(old_value, str): + try: + parsed_value = int(old_value, base=10) + except ValueError: + # Fall through to the final TypeError. + pass + else: + power_levels[key] = parsed_value + return + + raise TypeError(f"Invalid power_levels value for {key}: {old_value}") + + def validate_canonicaljson(value: Any) -> None: """ Ensure that the JSON object is valid according to the rules of canonical JSON. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b31f00b517..604eb6ec15 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -57,7 +57,7 @@ from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase -from synapse.events.utils import copy_power_levels_contents +from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.handlers.relations import BundledAggregations @@ -337,13 +337,13 @@ class RoomCreationHandler: # 50, but if the default PL in a room is 50 or more, then we set the # required PL above that. - pl_content = dict(old_room_pl_state.content) - users_default = int(pl_content.get("users_default", 0)) + pl_content = copy_and_fixup_power_levels_contents(old_room_pl_state.content) + users_default: int = pl_content.get("users_default", 0) # type: ignore[assignment] restricted_level = max(users_default + 1, 50) updated = False for v in ("invite", "events_default"): - current = int(pl_content.get(v, 0)) + current: int = pl_content.get(v, 0) # type: ignore[assignment] if current < restricted_level: logger.debug( "Setting level for %s in %s to %i (was %i)", @@ -380,7 +380,9 @@ class RoomCreationHandler: "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), - "content": old_room_pl_state.content, + "content": copy_and_fixup_power_levels_contents( + old_room_pl_state.content + ), }, ratelimit=False, ) @@ -471,7 +473,7 @@ class RoomCreationHandler: # dict so we can't just copy.deepcopy it. initial_state[ (EventTypes.PowerLevels, "") - ] = power_levels = copy_power_levels_contents( + ] = power_levels = copy_and_fixup_power_levels_contents( initial_state[(EventTypes.PowerLevels, "")] ) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 00ad19e446..b1c47efac7 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -17,7 +17,7 @@ from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( SerializeEventConfig, - copy_power_levels_contents, + copy_and_fixup_power_levels_contents, prune_event, serialize_event, ) @@ -529,7 +529,7 @@ class CopyPowerLevelsContentTestCase(unittest.TestCase): } def _test(self, input): - a = copy_power_levels_contents(input) + a = copy_and_fixup_power_levels_contents(input) self.assertEqual(a["ban"], 50) self.assertEqual(a["events"]["m.room.name"], 100) @@ -547,3 +547,40 @@ class CopyPowerLevelsContentTestCase(unittest.TestCase): def test_frozen(self): input = freeze(self.test_content) self._test(input) + + def test_stringy_integers(self): + """String representations of decimal integers are converted to integers.""" + input = { + "a": "100", + "b": { + "foo": 99, + "bar": "-98", + }, + "d": "0999", + } + output = copy_and_fixup_power_levels_contents(input) + expected_output = { + "a": 100, + "b": { + "foo": 99, + "bar": -98, + }, + "d": 999, + } + + self.assertEqual(output, expected_output) + + def test_strings_that_dont_represent_decimal_integers(self) -> None: + """Should raise when given inputs `s` for which `int(s, base=10)` raises.""" + for invalid_string in ["0x123", "123.0", "123.45", "hello", "0b1", "0o777"]: + with self.assertRaises(TypeError): + copy_and_fixup_power_levels_contents({"a": invalid_string}) + + def test_invalid_types_raise_type_error(self) -> None: + with self.assertRaises(TypeError): + copy_and_fixup_power_levels_contents({"a": ["hello", "grandma"]}) # type: ignore[arg-type] + copy_and_fixup_power_levels_contents({"a": None}) # type: ignore[arg-type] + + def test_invalid_nesting_raises_type_error(self) -> None: + with self.assertRaises(TypeError): + copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index b7d0f42daf..c86fc5df0d 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional +from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -167,6 +168,49 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): ) self.assertNotIn(self.other, power_levels["users"]) + def test_stringy_power_levels(self) -> None: + """The room upgrade converts stringy power levels to proper integers.""" + # Retrieve the room's current power levels. + power_levels = self.helper.get_state( + self.room_id, + "m.room.power_levels", + tok=self.creator_token, + ) + + # Set creator's power level to the string "100" instead of the integer `100`. + power_levels["users"][self.creator] = "100" + + # Synapse refuses to accept new stringy power level events. Bypass this by + # neutering the validation. + with patch("synapse.events.validator.jsonschema.validate"): + # Note: https://github.com/matrix-org/matrix-spec/issues/853 plans to forbid + # string power levels in new rooms. For this test to have a clean + # conscience, we ought to ensure it's upgrading from a sufficiently old + # version of room. + self.helper.send_state( + self.room_id, + "m.room.power_levels", + body=power_levels, + tok=self.creator_token, + ) + + # Upgrade the room. Check the homeserver reports success. + channel = self._upgrade_room() + self.assertEqual(200, channel.code, channel.result) + + # Extract the new room ID. + new_room_id = channel.json_body["replacement_room"] + + # Fetch the new room's power level event. + new_power_levels = self.helper.get_state( + new_room_id, + "m.room.power_levels", + tok=self.creator_token, + ) + + # We should now have an integer power level. + self.assertEqual(new_power_levels["users"][self.creator], 100, new_power_levels) + def test_space(self) -> None: """Test upgrading a space.""" From 0ce2201932488e98cab8a7d81788e5bcf8f8dd5e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Sat, 7 May 2022 13:40:58 +0100 Subject: [PATCH 61/87] Move `pympler` back into the `all` extras (#12652) * Move `pympler` back into the `all` extras Undoes a change I made in #12381. I can't fully remember my reasoning, but this changed the contents of the debian packages in a backwards incompatible way. We're not aware of anyone who's been bitten by this, but we still want to fix it. To the reviewer: please be convinced that the debian packages will still contain pympler after this change. * Debian changelog entry to keep the linter happy --- changelog.d/12652.misc | 1 + debian/build_virtualenv | 1 - debian/changelog | 8 ++++++++ poetry.lock | 4 ++-- pyproject.toml | 7 ++++--- 5 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12652.misc diff --git a/changelog.d/12652.misc b/changelog.d/12652.misc new file mode 100644 index 0000000000..7b7c1cf5ff --- /dev/null +++ b/changelog.d/12652.misc @@ -0,0 +1 @@ +Move `pympler` back in to the `all` extras. diff --git a/debian/build_virtualenv b/debian/build_virtualenv index d2955f7628..f1ec609163 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -41,7 +41,6 @@ poetry export \ --extras all \ --extras test \ --extras systemd \ - --extras cache_memory \ -o exported_requirements.txt deactivate rm -rf "$TEMP_VENV" diff --git a/debian/changelog b/debian/changelog index 5440f91bc0..5b21e0d369 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,11 @@ +matrix-synapse-py3 (1.58.2) UNRELEASED; urgency=medium + + * Adjust how the `exported-requirements.txt` file is generated as part of + the process of building these packages. This affects the package + maintainers only; end-users are unaffected. + + -- Synapse Packaging team Fri, 06 May 2022 13:49:29 +0100 + matrix-synapse-py3 (1.58.1) stable; urgency=medium * Include python dependencies from the `systemd` and `cache_memory` extras package groups, which diff --git a/poetry.lock b/poetry.lock index 564ba7ec02..ddafaaeba0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1546,7 +1546,7 @@ docs = ["sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis"] +all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis", "Pympler"] cache_memory = ["Pympler"] jwt = ["pyjwt"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "f24699464828ac1a63f1034b4a18c841ef585737b9a802fd8311836444f1d702" +content-hash = "eebc9e1d720e2e866f5fddda98ce83d858949a6fdbe30c7e5aef4cf9d17be498" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index 877f19708d..7348230fba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -231,10 +231,11 @@ all = [ "jaeger-client", "opentracing", # jwt "pyjwt", - #redis - "txredisapi", "hiredis" + # redis + "txredisapi", "hiredis", + # cache_memory + "pympler", # omitted: - # - cache_memory: this is an experimental option # - test: it's useful to have this separate from dev deps in the olddeps job # - systemd: this is a system-based requirement ] From 26c1ad71c555329dbef5f7989ecaf54448321db6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 9 May 2022 10:28:38 +0100 Subject: [PATCH 62/87] Use `Concatenate` to annotate `do_execute` (#12666) --- changelog.d/12666.misc | 1 + pyproject.toml | 2 +- synapse/storage/database.py | 19 ++++++++++++++----- 3 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12666.misc diff --git a/changelog.d/12666.misc b/changelog.d/12666.misc new file mode 100644 index 0000000000..96268e33f5 --- /dev/null +++ b/changelog.d/12666.misc @@ -0,0 +1 @@ +Use `Concatenate` to better annotate `_do_execute`. diff --git a/pyproject.toml b/pyproject.toml index 7348230fba..4c51b8c4a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,7 +142,7 @@ netaddr = ">=0.7.18" # add a lower bound to the Jinja2 dependency. Jinja2 = ">=3.0" bleach = ">=1.4.3" -# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0. +# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0. typing-extensions = ">=3.10.0" # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index df1e9c1b83..2255e55f6f 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -38,7 +38,7 @@ from typing import ( import attr from prometheus_client import Histogram -from typing_extensions import Literal +from typing_extensions import Concatenate, Literal, ParamSpec from twisted.enterprise import adbapi @@ -194,7 +194,7 @@ class LoggingDatabaseConnection: # The type of entry which goes on our after_callbacks and exception_callbacks lists. _CallbackListEntry = Tuple[Callable[..., object], Iterable[Any], Dict[str, Any]] - +P = ParamSpec("P") R = TypeVar("R") @@ -339,7 +339,13 @@ class LoggingTransaction: "Strip newlines out of SQL so that the loggers in the DB are on one line" return " ".join(line.strip() for line in sql.splitlines() if line.strip()) - def _do_execute(self, func: Callable[..., R], sql: str, *args: Any) -> R: + def _do_execute( + self, + func: Callable[Concatenate[str, P], R], + sql: str, + *args: P.args, + **kwargs: P.kwargs, + ) -> R: sql = self._make_sql_one_line(sql) # TODO(paul): Maybe use 'info' and 'debug' for values? @@ -348,7 +354,10 @@ class LoggingTransaction: sql = self.database_engine.convert_param_style(sql) if args: try: - sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) + # The type-ignore should be redundant once mypy releases a version with + # https://github.com/python/mypy/pull/12668. (`args` might be empty, + # (but we'll catch the index error if so.) + sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) # type: ignore[index] except Exception: # Don't let logging failures stop SQL from working pass @@ -363,7 +372,7 @@ class LoggingTransaction: opentracing.tags.DATABASE_STATEMENT: sql, }, ): - return func(sql, *args) + return func(sql, *args, **kwargs) except Exception as e: sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e) raise From 18d6c18aa16ab9809656d1175adf4617da44e36c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 May 2022 04:38:32 -0500 Subject: [PATCH 63/87] Fix docs on how to run specific Complement tests after recent `complement.sh` change (#12664) --- changelog.d/12664.doc | 1 + docs/development/contributing_guide.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12664.doc diff --git a/changelog.d/12664.doc b/changelog.d/12664.doc new file mode 100644 index 0000000000..142d18037a --- /dev/null +++ b/changelog.d/12664.doc @@ -0,0 +1 @@ +Fix docs on how to run specific Complement tests using the `complement.sh` test runner. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 3b5c774018..d356c72bf7 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -270,13 +270,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output: ```sh -COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages ``` To run a specific test, you can specify the whole name structure: ```sh -COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order ``` From 77258b67257983d67f90270d3d8e04594fd512ba Mon Sep 17 00:00:00 2001 From: Sheogorath Date: Mon, 9 May 2022 12:08:31 +0200 Subject: [PATCH 64/87] docs(contrib): Add link to documentation in dashboard (#12602) --- changelog.d/12602.misc | 1 + contrib/grafana/synapse.json | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12602.misc diff --git a/changelog.d/12602.misc b/changelog.d/12602.misc new file mode 100644 index 0000000000..cdccc5c316 --- /dev/null +++ b/changelog.d/12602.misc @@ -0,0 +1 @@ +Add link to documentation in Grafana Dashboard. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 2c839c30d0..819426b8ea 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -66,6 +66,18 @@ ], "title": "Dashboards", "type": "dashboards" + }, + { + "asDropdown": false, + "icon": "external link", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Synapse Documentation", + "tooltip": "Open Documentation", + "type": "link", + "url": "https://matrix-org.github.io/synapse/latest/" } ], "panels": [ @@ -10889,4 +10901,4 @@ "title": "Synapse", "uid": "000000012", "version": 100 -} \ No newline at end of file +} From c5969b346d2694fea32ddd063ce40fec41028b57 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 May 2022 11:09:19 +0100 Subject: [PATCH 65/87] Don't error on unknown receipt types (#12670) Fixes #12669 --- changelog.d/12670.feature | 1 + synapse/rest/client/read_marker.py | 25 ++++++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12670.feature diff --git a/changelog.d/12670.feature b/changelog.d/12670.feature new file mode 100644 index 0000000000..cd5c45029e --- /dev/null +++ b/changelog.d/12670.feature @@ -0,0 +1 @@ +Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 1583e903cd..3644705e6a 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -16,7 +16,6 @@ import logging from typing import TYPE_CHECKING, Tuple from synapse.api.constants import ReceiptTypes -from synapse.api.errors import SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -50,17 +49,21 @@ class ReadMarkerRestServlet(RestServlet): body = parse_json_object_from_request(request) - valid_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ} - if self.config.experimental.msc2285_enabled: - valid_receipt_types.add(ReceiptTypes.READ_PRIVATE) + valid_receipt_types = { + ReceiptTypes.READ, + ReceiptTypes.FULLY_READ, + ReceiptTypes.READ_PRIVATE, + } - if set(body.keys()) > valid_receipt_types: - raise SynapseError( - 400, - "Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'" - if self.config.experimental.msc2285_enabled - else "Receipt type must be 'm.read' or 'm.fully_read'", - ) + unrecognized_types = set(body.keys()) - valid_receipt_types + if unrecognized_types: + # It's fine if there are unrecognized receipt types, but let's log + # it to help debug clients that have typoed the receipt type. + # + # We specifically *don't* error here, as a) it stops us processing + # the valid receipts, and b) we need to be extensible on receipt + # types. + logger.info("Ignoring unrecognized receipt types: %s", unrecognized_types) read_event_id = body.get(ReceiptTypes.READ, None) if read_event_id: From fa0eab9c8e159b698a31fc7cfaafed643f47e284 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 9 May 2022 11:27:39 +0100 Subject: [PATCH 66/87] Use `ParamSpec` in a few places (#12667) --- changelog.d/12667.misc | 1 + poetry.lock | 2 +- pyproject.toml | 4 ++- synapse/app/_base.py | 14 ++++++++--- synapse/events/presence_router.py | 15 +++++++++--- synapse/module_api/__init__.py | 17 +++++++------ synapse/rest/client/knock.py | 4 +-- synapse/rest/client/transactions.py | 19 +++++++++------ synapse/storage/database.py | 31 +++++++++++++++--------- synapse/storage/databases/main/events.py | 8 ++++-- synapse/util/async_helpers.py | 26 ++++++++++++++------ synapse/util/distributor.py | 29 +++++++++++++++++----- synapse/util/metrics.py | 31 ++++++++++++++++-------- synapse/util/patch_inline_callbacks.py | 15 +++++++----- 14 files changed, 148 insertions(+), 68 deletions(-) create mode 100644 changelog.d/12667.misc diff --git a/changelog.d/12667.misc b/changelog.d/12667.misc new file mode 100644 index 0000000000..2b17502d6b --- /dev/null +++ b/changelog.d/12667.misc @@ -0,0 +1 @@ +Use `ParamSpec` to refine type hints. diff --git a/poetry.lock b/poetry.lock index ddafaaeba0..f649efdf2b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "eebc9e1d720e2e866f5fddda98ce83d858949a6fdbe30c7e5aef4cf9d17be498" +content-hash = "d39d5ac5d51c014581186b7691999b861058b569084c525523baf70b77f292b1" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index 4c51b8c4a1..2c4b7eb08e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -143,7 +143,9 @@ netaddr = ">=0.7.18" Jinja2 = ">=3.0" bleach = ">=1.4.3" # We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0. -typing-extensions = ">=3.10.0" +# Additionally we need https://github.com/python/typing/pull/817 to allow types to be +# generic over ParamSpecs. +typing-extensions = ">=3.10.0.1" # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. cryptography = ">=3.4.7" diff --git a/synapse/app/_base.py b/synapse/app/_base.py index d28b87a3f4..3623c1724d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -38,6 +38,7 @@ from typing import ( from cryptography.utils import CryptographyDeprecationWarning from matrix_common.versionstring import get_distribution_version_string +from typing_extensions import ParamSpec import twisted from twisted.internet import defer, error, reactor as _reactor @@ -81,11 +82,12 @@ logger = logging.getLogger(__name__) # list of tuples of function, args list, kwargs dict _sighup_callbacks: List[ - Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]] + Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]] ] = [] +P = ParamSpec("P") -def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> None: +def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: """ Register a function to be called when a SIGHUP occurs. @@ -93,7 +95,9 @@ def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> Non func: Function to be called when sent a SIGHUP signal. *args, **kwargs: args and kwargs to be passed to the target function. """ - _sighup_callbacks.append((func, args, kwargs)) + # This type-ignore should be redundant once we use a mypy release with + # https://github.com/python/mypy/pull/12668. + _sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type] def start_worker_reactor( @@ -214,7 +218,9 @@ def redirect_stdio_to_logs() -> None: print("Redirected stdout/stderr to logs") -def register_start(cb: Callable[..., Awaitable], *args: Any, **kwargs: Any) -> None: +def register_start( + cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs +) -> None: """Register a callback with the reactor, to be called once it is running This can be used to initialise parts of the system which require an asynchronous diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 98555c8c0c..8437ce52dc 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -22,9 +22,12 @@ from typing import ( List, Optional, Set, + TypeVar, Union, ) +from typing_extensions import ParamSpec + from synapse.api.presence import UserPresenceState from synapse.util.async_helpers import maybe_awaitable @@ -40,6 +43,10 @@ GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]] logger = logging.getLogger(__name__) +P = ParamSpec("P") +R = TypeVar("R") + + def load_legacy_presence_router(hs: "HomeServer") -> None: """Wrapper that loads a presence router module configured using the old configuration, and registers the hooks they implement. @@ -63,13 +70,15 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed - def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: + def async_wrapper( + f: Optional[Callable[P, R]] + ) -> Optional[Callable[P, Awaitable[R]]]: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: return None - def run(*args: Any, **kwargs: Any) -> Awaitable: + def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]: # Assertion required because mypy can't prove we won't change `f` # back to `None`. See # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions @@ -80,7 +89,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: return run # Register the hooks through the module API. - hooks = { + hooks: Dict[str, Optional[Callable[..., Any]]] = { hook: async_wrapper(getattr(presence_router, hook, None)) for hook in presence_router_methods } diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 834fe1b62c..73f92d2df8 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -30,6 +30,7 @@ from typing import ( import attr import jinja2 +from typing_extensions import ParamSpec from twisted.internet import defer from twisted.web.resource import Resource @@ -129,6 +130,7 @@ if TYPE_CHECKING: T = TypeVar("T") +P = ParamSpec("P") """ This package defines the 'stable' API which can be used by extension modules which @@ -799,9 +801,9 @@ class ModuleApi: def run_db_interaction( self, desc: str, - func: Callable[..., T], - *args: Any, - **kwargs: Any, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[T]": """Run a function with a database connection @@ -817,8 +819,9 @@ class ModuleApi: Returns: Deferred[object]: result of func """ + # type-ignore: See https://github.com/python/mypy/issues/8862 return defer.ensureDeferred( - self._store.db_pool.runInteraction(desc, func, *args, **kwargs) + self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type] ) def complete_sso_login( @@ -1296,9 +1299,9 @@ class ModuleApi: async def defer_to_thread( self, - f: Callable[..., T], - *args: Any, - **kwargs: Any, + f: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, ) -> T: """Runs the given function in a separate thread from Synapse's thread pool. diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index 0152a0c66a..ad025c8a45 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -15,8 +15,6 @@ import logging from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple -from twisted.web.server import Request - from synapse.api.constants import Membership from synapse.api.errors import SynapseError from synapse.http.server import HttpServer @@ -97,7 +95,7 @@ class KnockRoomAliasServlet(RestServlet): return 200, {"room_id": room_id} def on_PUT( - self, request: Request, room_identifier: str, txn_id: str + self, request: SynapseRequest, room_identifier: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: set_tag("txn_id", txn_id) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 914fb3acf5..61375651bc 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -15,7 +15,9 @@ """This module contains logic for storing HTTP PUT transactions. This is used to ensure idempotency when performing PUTs using the REST API.""" import logging -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple + +from typing_extensions import ParamSpec from twisted.python.failure import Failure from twisted.web.server import Request @@ -32,6 +34,9 @@ logger = logging.getLogger(__name__) CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins +P = ParamSpec("P") + + class HttpTransactionCache: def __init__(self, hs: "HomeServer"): self.hs = hs @@ -65,9 +70,9 @@ class HttpTransactionCache: def fetch_or_execute_request( self, request: Request, - fn: Callable[..., Awaitable[Tuple[int, JsonDict]]], - *args: Any, - **kwargs: Any, + fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], + *args: P.args, + **kwargs: P.kwargs, ) -> Awaitable[Tuple[int, JsonDict]]: """A helper function for fetch_or_execute which extracts a transaction key from the given request. @@ -82,9 +87,9 @@ class HttpTransactionCache: def fetch_or_execute( self, txn_key: str, - fn: Callable[..., Awaitable[Tuple[int, JsonDict]]], - *args: Any, - **kwargs: Any, + fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], + *args: P.args, + **kwargs: P.kwargs, ) -> Awaitable[Tuple[int, JsonDict]]: """Fetches the response for this transaction, or executes the given function to produce a response for this transaction. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 2255e55f6f..41f566b648 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -192,7 +192,7 @@ class LoggingDatabaseConnection: # The type of entry which goes on our after_callbacks and exception_callbacks lists. -_CallbackListEntry = Tuple[Callable[..., object], Iterable[Any], Dict[str, Any]] +_CallbackListEntry = Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] P = ParamSpec("P") R = TypeVar("R") @@ -239,7 +239,9 @@ class LoggingTransaction: self.after_callbacks = after_callbacks self.exception_callbacks = exception_callbacks - def call_after(self, callback: Callable[..., object], *args: Any, **kwargs: Any): + def call_after( + self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs + ) -> None: """Call the given callback on the main twisted thread after the transaction has finished. @@ -256,11 +258,12 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.after_callbacks is not None - self.after_callbacks.append((callback, args, kwargs)) + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] def call_on_exception( - self, callback: Callable[..., object], *args: Any, **kwargs: Any - ): + self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs + ) -> None: """Call the given callback on the main twisted thread after the transaction has failed. @@ -274,7 +277,8 @@ class LoggingTransaction: # LoggingTransaction isn't expecting there to be any callbacks; assert that # is not the case. assert self.exception_callbacks is not None - self.exception_callbacks.append((callback, args, kwargs)) + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.exception_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] def fetchone(self) -> Optional[Tuple]: return self.txn.fetchone() @@ -549,9 +553,9 @@ class DatabasePool: desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], - func: Callable[..., R], - *args: Any, - **kwargs: Any, + func: Callable[Concatenate[LoggingTransaction, P], R], + *args: P.args, + **kwargs: P.kwargs, ) -> R: """Start a new database transaction with the given connection. @@ -581,7 +585,10 @@ class DatabasePool: # will fail if we have to repeat the transaction. # For now, we just log an error, and hope that it works on the first attempt. # TODO: raise an exception. - for i, arg in enumerate(args): + + # Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see + # https://github.com/python/mypy/pull/12668 + for i, arg in enumerate(args): # type: ignore[arg-type, var-annotated] if inspect.isgenerator(arg): logger.error( "Programming error: generator passed to new_transaction as " @@ -589,7 +596,9 @@ class DatabasePool: i, func, ) - for name, val in kwargs.items(): + # Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see + # https://github.com/python/mypy/pull/12668 + for name, val in kwargs.items(): # type: ignore[attr-defined] if inspect.isgenerator(val): logger.error( "Programming error: generator passed to new_transaction as " diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 9a6c2fd47a..ed29a0a5e2 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1648,8 +1648,12 @@ class PersistEventsStore: txn.call_after(prefill) def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: - # Invalidate the caches for the redacted event, note that these caches - # are also cleared as part of event replication in _invalidate_caches_for_event. + """Invalidate the caches for the redacted event. + + Note that these caches are also cleared as part of event replication in + _invalidate_caches_for_event. + """ + assert event.redacts is not None txn.call_after(self.store._invalidate_get_event_cache, event.redacts) txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index e27c5d298f..b91020117f 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -42,7 +42,7 @@ from typing import ( ) import attr -from typing_extensions import AsyncContextManager, Literal +from typing_extensions import AsyncContextManager, Concatenate, Literal, ParamSpec from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -237,9 +237,16 @@ async def concurrently_execute( ) +P = ParamSpec("P") +R = TypeVar("R") + + async def yieldable_gather_results( - func: Callable[..., Awaitable[T]], iter: Iterable, *args: Any, **kwargs: Any -) -> List[T]: + func: Callable[Concatenate[T, P], Awaitable[R]], + iter: Iterable[T], + *args: P.args, + **kwargs: P.kwargs, +) -> List[R]: """Executes the function with each argument concurrently. Args: @@ -255,7 +262,15 @@ async def yieldable_gather_results( try: return await make_deferred_yieldable( defer.gatherResults( - [run_in_background(func, item, *args, **kwargs) for item in iter], + # type-ignore: mypy reports two errors: + # error: Argument 1 to "run_in_background" has incompatible type + # "Callable[[T, **P], Awaitable[R]]"; expected + # "Callable[[T, **P], Awaitable[R]]" [arg-type] + # error: Argument 2 to "run_in_background" has incompatible type + # "T"; expected "[T, **P.args]" [arg-type] + # The former looks like a mypy bug, and the latter looks like a + # false positive. + [run_in_background(func, item, *args, **kwargs) for item in iter], # type: ignore[arg-type] consumeErrors=True, ) ) @@ -577,9 +592,6 @@ class ReadWriteLock: return _ctx_manager() -R = TypeVar("R") - - def timeout_deferred( deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime ) -> "defer.Deferred[_T]": diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 91837655f8..b580bdd0de 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -12,7 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Callable, Dict, List +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Generic, + List, + Optional, + TypeVar, + Union, +) + +from typing_extensions import ParamSpec from twisted.internet import defer @@ -75,7 +87,11 @@ class Distributor: run_as_background_process(name, self.signals[name].fire, *args, **kwargs) -class Signal: +P = ParamSpec("P") +R = TypeVar("R") + + +class Signal(Generic[P]): """A Signal is a dispatch point that stores a list of callables as observers of it. @@ -87,16 +103,16 @@ class Signal: def __init__(self, name: str): self.name: str = name - self.observers: List[Callable] = [] + self.observers: List[Callable[P, Any]] = [] - def observe(self, observer: Callable) -> None: + def observe(self, observer: Callable[P, Any]) -> None: """Adds a new callable to the observer list which will be invoked by the 'fire' method. Each observer callable may return a Deferred.""" self.observers.append(observer) - def fire(self, *args: Any, **kwargs: Any) -> "defer.Deferred[List[Any]]": + def fire(self, *args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[List[Any]]": """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is not an error to fire a signal with no observers. @@ -104,7 +120,7 @@ class Signal: Returns a Deferred that will complete when all the observers have completed.""" - async def do(observer: Callable[..., Any]) -> Any: + async def do(observer: Callable[P, Union[R, Awaitable[R]]]) -> Optional[R]: try: return await maybe_awaitable(observer(*args, **kwargs)) except Exception as e: @@ -114,6 +130,7 @@ class Signal: observer, e, ) + return None deferreds = [run_in_background(do, o) for o in self.observers] diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 98ee49af6e..bc3b4938ea 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -15,10 +15,10 @@ import logging from functools import wraps from types import TracebackType -from typing import Any, Callable, Optional, Type, TypeVar, cast +from typing import Awaitable, Callable, Optional, Type, TypeVar from prometheus_client import Counter -from typing_extensions import Protocol +from typing_extensions import Concatenate, ParamSpec, Protocol from synapse.logging.context import ( ContextResourceUsage, @@ -72,16 +72,21 @@ in_flight: InFlightGauge[_InFlightMetric] = InFlightGauge( ) -T = TypeVar("T", bound=Callable[..., Any]) +P = ParamSpec("P") +R = TypeVar("R") class HasClock(Protocol): clock: Clock -def measure_func(name: Optional[str] = None) -> Callable[[T], T]: - """ - Used to decorate an async function with a `Measure` context manager. +def measure_func( + name: Optional[str] = None, +) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]: + """Decorate an async method with a `Measure` context manager. + + The Measure is created using `self.clock`; it should only be used to decorate + methods in classes defining an instance-level `clock` attribute. Usage: @@ -97,18 +102,24 @@ def measure_func(name: Optional[str] = None) -> Callable[[T], T]: """ - def wrapper(func: T) -> T: + def wrapper( + func: Callable[Concatenate[HasClock, P], Awaitable[R]] + ) -> Callable[P, Awaitable[R]]: block_name = func.__name__ if name is None else name @wraps(func) - async def measured_func(self: HasClock, *args: Any, **kwargs: Any) -> Any: + async def measured_func(self: HasClock, *args: P.args, **kwargs: P.kwargs) -> R: with Measure(self.clock, block_name): r = await func(self, *args, **kwargs) return r - return cast(T, measured_func) + # There are some shenanigans here, because we're decorating a method but + # explicitly making use of the `self` parameter. The key thing here is that the + # return type within the return type for `measure_func` itself describes how the + # decorated function will be called. + return measured_func # type: ignore[return-value] - return wrapper + return wrapper # type: ignore[return-value] class Measure: diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index dace68666c..f97f98a057 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -16,6 +16,8 @@ import functools import sys from typing import Any, Callable, Generator, List, TypeVar, cast +from typing_extensions import ParamSpec + from twisted.internet import defer from twisted.internet.defer import Deferred from twisted.python.failure import Failure @@ -25,6 +27,7 @@ _already_patched = False T = TypeVar("T") +P = ParamSpec("P") def do_patch() -> None: @@ -41,13 +44,13 @@ def do_patch() -> None: return def new_inline_callbacks( - f: Callable[..., Generator["Deferred[object]", object, T]] - ) -> Callable[..., "Deferred[T]"]: + f: Callable[P, Generator["Deferred[object]", object, T]] + ) -> Callable[P, "Deferred[T]"]: @functools.wraps(f) - def wrapped(*args: Any, **kwargs: Any) -> "Deferred[T]": + def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]": start_context = current_context() changes: List[str] = [] - orig: Callable[..., "Deferred[T]"] = orig_inline_callbacks( + orig: Callable[P, "Deferred[T]"] = orig_inline_callbacks( _check_yield_points(f, changes) ) @@ -115,7 +118,7 @@ def do_patch() -> None: def _check_yield_points( - f: Callable[..., Generator["Deferred[object]", object, T]], + f: Callable[P, Generator["Deferred[object]", object, T]], changes: List[str], ) -> Callable: """Wraps a generator that is about to be passed to defer.inlineCallbacks @@ -138,7 +141,7 @@ def _check_yield_points( @functools.wraps(f) def check_yield_points_inner( - *args: Any, **kwargs: Any + *args: P.args, **kwargs: P.kwargs ) -> Generator["Deferred[object]", object, T]: gen = f(*args, **kwargs) From 41a882e62d6d17f7296e4c77f5bde9692aa8aa8b Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 9 May 2022 11:34:39 +0100 Subject: [PATCH 67/87] Update changelog for #12587 to be more accurate (#12663) #12587 has fallen on the wrong side of the release cutoff to the rest of the related PRs. Signed-off-by: Sean Quah --- changelog.d/12587.misc | 2 +- changelog.d/12663.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12663.misc diff --git a/changelog.d/12587.misc b/changelog.d/12587.misc index d26e332305..3b466f1ddf 100644 --- a/changelog.d/12587.misc +++ b/changelog.d/12587.misc @@ -1 +1 @@ -Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. +Log status code of cancelled requests as 499 and avoid logging stack traces for them. diff --git a/changelog.d/12663.misc b/changelog.d/12663.misc new file mode 100644 index 0000000000..3b466f1ddf --- /dev/null +++ b/changelog.d/12663.misc @@ -0,0 +1 @@ +Log status code of cancelled requests as 499 and avoid logging stack traces for them. From 8de0facaae08c422cbf1acefe898820d3bf5c632 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 9 May 2022 11:48:14 +0100 Subject: [PATCH 68/87] Fix mypy against latest pillow stubs (#12671) --- changelog.d/12671.misc | 1 + poetry.lock | 6 +++--- synapse/rest/media/v1/thumbnailer.py | 6 +++--- 3 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12671.misc diff --git a/changelog.d/12671.misc b/changelog.d/12671.misc new file mode 100644 index 0000000000..56df4e3831 --- /dev/null +++ b/changelog.d/12671.misc @@ -0,0 +1 @@ +Fix mypy against latest pillow stubs. diff --git a/poetry.lock b/poetry.lock index f649efdf2b..49a912a589 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1371,7 +1371,7 @@ python-versions = "*" [[package]] name = "types-pillow" -version = "9.0.6" +version = "9.0.15" description = "Typing stubs for Pillow" category = "dev" optional = false @@ -2626,8 +2626,8 @@ types-opentracing = [ {file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"}, ] types-pillow = [ - {file = "types-Pillow-9.0.6.tar.gz", hash = "sha256:79b350b1188c080c27558429f1e119e69c9f020b877a82df761d9283070e0185"}, - {file = "types_Pillow-9.0.6-py3-none-any.whl", hash = "sha256:bd1e0a844fc718398aa265bf50fcad550fc520cc54f80e5ffeb7b3226b3cc507"}, + {file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"}, + {file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"}, ] types-psycopg2 = [ {file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"}, diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 5e17664b5b..390491eb83 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -121,10 +121,10 @@ class Thumbnailer: # # If the image has transparency, use RGBA instead. if self.image.mode in ["1", "L", "P"]: - mode = "RGB" if self.image.info.get("transparency", None) is not None: - mode = "RGBA" - self.image = self.image.convert(mode) + self.image = self.image.convert("RGBA") + else: + self.image = self.image.convert("RGB") return self.image.resize((width, height), Image.ANTIALIAS) def scale(self, width: int, height: int, output_type: str) -> BytesIO: From a00462dd9927558532b030593f8914ade53b7214 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 9 May 2022 12:31:14 +0100 Subject: [PATCH 69/87] Implement cancellation support/protection for module callbacks (#12568) There's no guarantee that module callbacks will handle cancellation appropriately. Protect module callbacks with read semantics from cancellation and avoid swallowing `CancelledError`s that arise. Other module callbacks, such as the `on_*` callbacks, are presumed to live on code paths that involve writes and aren't cancellation-friendly. These module callbacks have been left alone. Signed-off-by: Sean Quah --- changelog.d/12568.misc | 1 + synapse/events/presence_router.py | 12 +++++++--- synapse/events/spamcheck.py | 36 +++++++++++++++++++--------- synapse/events/third_party_rules.py | 36 +++++++++++++++++++++++----- synapse/handlers/account_validity.py | 3 ++- synapse/handlers/auth.py | 25 ++++++++++++++----- 6 files changed, 86 insertions(+), 27 deletions(-) create mode 100644 changelog.d/12568.misc diff --git a/changelog.d/12568.misc b/changelog.d/12568.misc new file mode 100644 index 0000000000..f64dc67c4f --- /dev/null +++ b/changelog.d/12568.misc @@ -0,0 +1 @@ +Protect module callbacks with read semantics against cancellation. diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 8437ce52dc..bb4a6bd957 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -28,8 +28,10 @@ from typing import ( from typing_extensions import ParamSpec +from twisted.internet.defer import CancelledError + from synapse.api.presence import UserPresenceState -from synapse.util.async_helpers import maybe_awaitable +from synapse.util.async_helpers import delay_cancellation, maybe_awaitable if TYPE_CHECKING: from synapse.server import HomeServer @@ -158,7 +160,9 @@ class PresenceRouter: try: # Note: result is an object here, because we don't trust modules to # return the types they're supposed to. - result: object = await callback(state_updates) + result: object = await delay_cancellation(callback(state_updates)) + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue @@ -210,7 +214,9 @@ class PresenceRouter: # run all the callbacks for get_interested_users and combine the results for callback in self._get_interested_users_callbacks: try: - result = await callback(user_id) + result = await delay_cancellation(callback(user_id)) + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index cd80fcf9d1..3b6795d40f 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -31,7 +31,7 @@ from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, UserProfile -from synapse.util.async_helpers import maybe_awaitable +from synapse.util.async_helpers import delay_cancellation, maybe_awaitable if TYPE_CHECKING: import synapse.events @@ -255,7 +255,7 @@ class SpamChecker: will be used as the error message returned to the user. """ for callback in self._check_event_for_spam_callbacks: - res: Union[bool, str] = await callback(event) + res: Union[bool, str] = await delay_cancellation(callback(event)) if res: return res @@ -276,7 +276,10 @@ class SpamChecker: Whether the user may join the room """ for callback in self._user_may_join_room_callbacks: - if await callback(user_id, room_id, is_invited) is False: + may_join_room = await delay_cancellation( + callback(user_id, room_id, is_invited) + ) + if may_join_room is False: return False return True @@ -297,7 +300,10 @@ class SpamChecker: True if the user may send an invite, otherwise False """ for callback in self._user_may_invite_callbacks: - if await callback(inviter_userid, invitee_userid, room_id) is False: + may_invite = await delay_cancellation( + callback(inviter_userid, invitee_userid, room_id) + ) + if may_invite is False: return False return True @@ -322,7 +328,10 @@ class SpamChecker: True if the user may send the invite, otherwise False """ for callback in self._user_may_send_3pid_invite_callbacks: - if await callback(inviter_userid, medium, address, room_id) is False: + may_send_3pid_invite = await delay_cancellation( + callback(inviter_userid, medium, address, room_id) + ) + if may_send_3pid_invite is False: return False return True @@ -339,7 +348,8 @@ class SpamChecker: True if the user may create a room, otherwise False """ for callback in self._user_may_create_room_callbacks: - if await callback(userid) is False: + may_create_room = await delay_cancellation(callback(userid)) + if may_create_room is False: return False return True @@ -359,7 +369,10 @@ class SpamChecker: True if the user may create a room alias, otherwise False """ for callback in self._user_may_create_room_alias_callbacks: - if await callback(userid, room_alias) is False: + may_create_room_alias = await delay_cancellation( + callback(userid, room_alias) + ) + if may_create_room_alias is False: return False return True @@ -377,7 +390,8 @@ class SpamChecker: True if the user may publish the room, otherwise False """ for callback in self._user_may_publish_room_callbacks: - if await callback(userid, room_id) is False: + may_publish_room = await delay_cancellation(callback(userid, room_id)) + if may_publish_room is False: return False return True @@ -400,7 +414,7 @@ class SpamChecker: for callback in self._check_username_for_spam_callbacks: # Make a copy of the user profile object to ensure the spam checker cannot # modify it. - if await callback(user_profile.copy()): + if await delay_cancellation(callback(user_profile.copy())): return True return False @@ -428,7 +442,7 @@ class SpamChecker: """ for callback in self._check_registration_for_spam_callbacks: - behaviour = await ( + behaviour = await delay_cancellation( callback(email_threepid, username, request_info, auth_provider_id) ) assert isinstance(behaviour, RegistrationBehaviour) @@ -472,7 +486,7 @@ class SpamChecker: """ for callback in self._check_media_file_for_spam_callbacks: - spam = await callback(file_wrapper, file_info) + spam = await delay_cancellation(callback(file_wrapper, file_info)) if spam: return True diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index ef68e20282..9f4ff9799c 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -14,12 +14,14 @@ import logging from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple +from twisted.internet.defer import CancelledError + from synapse.api.errors import ModuleFailedException, SynapseError from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.storage.roommember import ProfileInfo from synapse.types import Requester, StateMap -from synapse.util.async_helpers import maybe_awaitable +from synapse.util.async_helpers import delay_cancellation, maybe_awaitable if TYPE_CHECKING: from synapse.server import HomeServer @@ -263,7 +265,11 @@ class ThirdPartyEventRules: for callback in self._check_event_allowed_callbacks: try: - res, replacement_data = await callback(event, state_events) + res, replacement_data = await delay_cancellation( + callback(event, state_events) + ) + except CancelledError: + raise except SynapseError as e: # FIXME: Being able to throw SynapseErrors is relied upon by # some modules. PR #10386 accidentally broke this ability. @@ -333,8 +339,13 @@ class ThirdPartyEventRules: for callback in self._check_threepid_can_be_invited_callbacks: try: - if await callback(medium, address, state_events) is False: + threepid_can_be_invited = await delay_cancellation( + callback(medium, address, state_events) + ) + if threepid_can_be_invited is False: return False + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) @@ -361,8 +372,13 @@ class ThirdPartyEventRules: for callback in self._check_visibility_can_be_modified_callbacks: try: - if await callback(room_id, state_events, new_visibility) is False: + visibility_can_be_modified = await delay_cancellation( + callback(room_id, state_events, new_visibility) + ) + if visibility_can_be_modified is False: return False + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) @@ -400,8 +416,11 @@ class ThirdPartyEventRules: """ for callback in self._check_can_shutdown_room_callbacks: try: - if await callback(user_id, room_id) is False: + can_shutdown_room = await delay_cancellation(callback(user_id, room_id)) + if can_shutdown_room is False: return False + except CancelledError: + raise except Exception as e: logger.exception( "Failed to run module API callback %s: %s", callback, e @@ -422,8 +441,13 @@ class ThirdPartyEventRules: """ for callback in self._check_can_deactivate_user_callbacks: try: - if await callback(user_id, by_admin) is False: + can_deactivate_user = await delay_cancellation( + callback(user_id, by_admin) + ) + if can_deactivate_user is False: return False + except CancelledError: + raise except Exception as e: logger.exception( "Failed to run module API callback %s: %s", callback, e diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 05a138410e..33e45e3a11 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -23,6 +23,7 @@ from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import UserID from synapse.util import stringutils +from synapse.util.async_helpers import delay_cancellation if TYPE_CHECKING: from synapse.server import HomeServer @@ -150,7 +151,7 @@ class AccountValidityHandler: Whether the user has expired. """ for callback in self._is_user_expired_callbacks: - expired = await callback(user_id) + expired = await delay_cancellation(callback(user_id)) if expired is not None: return expired diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index ad41337b28..1b9050ea96 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -41,6 +41,7 @@ import pymacaroons import unpaddedbase64 from pymacaroons.exceptions import MacaroonVerificationFailedException +from twisted.internet.defer import CancelledError from twisted.web.server import Request from synapse.api.constants import LoginType @@ -67,7 +68,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.roommember import ProfileInfo from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils -from synapse.util.async_helpers import maybe_awaitable +from synapse.util.async_helpers import delay_cancellation, maybe_awaitable from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode @@ -2202,7 +2203,11 @@ class PasswordAuthProvider: # other than None (i.e. until a callback returns a success) for callback in self.auth_checker_callbacks[login_type]: try: - result = await callback(username, login_type, login_dict) + result = await delay_cancellation( + callback(username, login_type, login_dict) + ) + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue @@ -2263,7 +2268,9 @@ class PasswordAuthProvider: for callback in self.check_3pid_auth_callbacks: try: - result = await callback(medium, address, password) + result = await delay_cancellation(callback(medium, address, password)) + except CancelledError: + raise except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue @@ -2345,7 +2352,7 @@ class PasswordAuthProvider: """ for callback in self.get_username_for_registration_callbacks: try: - res = await callback(uia_results, params) + res = await delay_cancellation(callback(uia_results, params)) if isinstance(res, str): return res @@ -2359,6 +2366,8 @@ class PasswordAuthProvider: callback, res, ) + except CancelledError: + raise except Exception as e: logger.error( "Module raised an exception in get_username_for_registration: %s", @@ -2388,7 +2397,7 @@ class PasswordAuthProvider: """ for callback in self.get_displayname_for_registration_callbacks: try: - res = await callback(uia_results, params) + res = await delay_cancellation(callback(uia_results, params)) if isinstance(res, str): return res @@ -2402,6 +2411,8 @@ class PasswordAuthProvider: callback, res, ) + except CancelledError: + raise except Exception as e: logger.error( "Module raised an exception in get_displayname_for_registration: %s", @@ -2429,7 +2440,7 @@ class PasswordAuthProvider: """ for callback in self.is_3pid_allowed_callbacks: try: - res = await callback(medium, address, registration) + res = await delay_cancellation(callback(medium, address, registration)) if res is False: return res @@ -2443,6 +2454,8 @@ class PasswordAuthProvider: callback, res, ) + except CancelledError: + raise except Exception as e: logger.error("Module raised an exception in is_3pid_allowed: %s", e) raise SynapseError(code=500, msg="Internal Server Error") From bf0c3ca20a79136df962645d952ac75ce9d3615f Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Mon, 9 May 2022 22:29:07 +0200 Subject: [PATCH 70/87] Fix inconsistent spelling of 'M_UNRECOGNIZED'. (#12665) --- changelog.d/12665.misc | 1 + synapse/federation/federation_client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12665.misc diff --git a/changelog.d/12665.misc b/changelog.d/12665.misc new file mode 100644 index 0000000000..37b96fea37 --- /dev/null +++ b/changelog.d/12665.misc @@ -0,0 +1 @@ +Fix spelling of `M_UNRECOGNIZED` in comments. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b5e0b84cbc..17eff60909 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -618,7 +618,7 @@ class FederationClient(FederationBase): # # Dendrite returns a 404 (with a body of "404 page not found"); # Conduit returns a 404 (with no body); and Synapse returns a 400 - # with M_UNRECOGNISED. + # with M_UNRECOGNIZED. # # This needs to be rather specific as some endpoints truly do return 404 # errors. From 34e84fee681e8c4503e17b28d6dc50dccfb6d84f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 9 May 2022 22:41:06 +0100 Subject: [PATCH 71/87] Tweaks to workers-under-complement (#12637) * Bump the HS startup timeout * Log prefixes for more processes * Bump the overall timeout --- changelog.d/12637.misc | 1 + docker/conf-workers/supervisord.conf.j2 | 6 +++--- scripts-dev/complement.sh | 13 +++++++++++-- 3 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12637.misc diff --git a/changelog.d/12637.misc b/changelog.d/12637.misc new file mode 100644 index 0000000000..735257787f --- /dev/null +++ b/changelog.d/12637.misc @@ -0,0 +1 @@ +Minor improvements to the scripts for running Synapse in worker mode under Complement. diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index 408ef72787..ca1f7aef8e 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -9,7 +9,7 @@ user=root files = /etc/supervisor/conf.d/*.conf [program:nginx] -command=/usr/sbin/nginx -g "daemon off;" +command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;" priority=500 stdout_logfile=/dev/stdout stdout_logfile_maxbytes=0 @@ -19,7 +19,7 @@ username=www-data autorestart=true [program:redis] -command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no +command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no priority=1 stdout_logfile=/dev/stdout stdout_logfile_maxbytes=0 @@ -29,7 +29,7 @@ username=redis autorestart=true [program:synapse_main] -command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml +command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml priority=10 # Log startup failures to supervisord's stdout/err # Regular synapse logs will still go in the configured data directory diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index e0feba05fa..190df6909a 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -43,6 +43,8 @@ fi # Build the base Synapse image from the local checkout docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . +extra_test_args=() + # If we're using workers, modify the docker files slightly. if [[ -n "$WORKERS" ]]; then # Build the workers docker image (from the base Synapse image). @@ -52,7 +54,14 @@ if [[ -n "$WORKERS" ]]; then COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile # And provide some more configuration to complement. - export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60 + + # It can take quite a while to spin up a worker-mode Synapse for the first + # time (the main problem is that we start 14 python processes for each test, + # and complement likes to do two of them in parallel). + export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 + + # ... and it takes longer than 10m to run the whole suite. + extra_test_args+=("-timeout=60m") else export COMPLEMENT_BASE_IMAGE=complement-synapse COMPLEMENT_DOCKERFILE=Dockerfile @@ -64,4 +73,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" -go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "$@" ./tests/... +go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/... From 615d96ad6e5bba6f260cb03f4ec119bef51a3309 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 May 2022 23:43:02 +0200 Subject: [PATCH 72/87] Update SQL statements in docs for Synapse Admins (#12536) --- changelog.d/12536.doc | 1 + .../administration/useful_sql_for_admins.md | 191 +++++++++++------- 2 files changed, 122 insertions(+), 70 deletions(-) create mode 100644 changelog.d/12536.doc diff --git a/changelog.d/12536.doc b/changelog.d/12536.doc new file mode 100644 index 0000000000..4034c42076 --- /dev/null +++ b/changelog.d/12536.doc @@ -0,0 +1 @@ +Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. diff --git a/docs/usage/administration/useful_sql_for_admins.md b/docs/usage/administration/useful_sql_for_admins.md index d4aada3272..f3b97f9576 100644 --- a/docs/usage/administration/useful_sql_for_admins.md +++ b/docs/usage/administration/useful_sql_for_admins.md @@ -1,7 +1,10 @@ ## Some useful SQL queries for Synapse Admins ## Size of full matrix db -`SELECT pg_size_pretty( pg_database_size( 'matrix' ) );` +```sql +SELECT pg_size_pretty( pg_database_size( 'matrix' ) ); +``` + ### Result example: ``` pg_size_pretty @@ -9,39 +12,19 @@ pg_size_pretty 6420 MB (1 row) ``` -## Show top 20 larger rooms by state events count -```sql -SELECT r.name, s.room_id, s.current_state_events - FROM room_stats_current s - LEFT JOIN room_stats_state r USING (room_id) - ORDER BY current_state_events DESC - LIMIT 20; -``` - -and by state_group_events count: -```sql -SELECT rss.name, s.room_id, count(s.room_id) FROM state_groups_state s -LEFT JOIN room_stats_state rss USING (room_id) -GROUP BY s.room_id, rss.name -ORDER BY count(s.room_id) DESC -LIMIT 20; -``` -plus same, but with join removed for performance reasons: -```sql -SELECT s.room_id, count(s.room_id) FROM state_groups_state s -GROUP BY s.room_id -ORDER BY count(s.room_id) DESC -LIMIT 20; -``` ## Show top 20 larger tables by row count ```sql -SELECT relname, n_live_tup as rows - FROM pg_stat_user_tables +SELECT relname, n_live_tup AS "rows" + FROM pg_stat_user_tables ORDER BY n_live_tup DESC LIMIT 20; ``` -This query is quick, but may be very approximate, for exact number of rows use `SELECT COUNT(*) FROM `. +This query is quick, but may be very approximate, for exact number of rows use: +```sql +SELECT COUNT(*) FROM ; +``` + ### Result example: ``` state_groups_state - 161687170 @@ -66,46 +49,19 @@ device_lists_stream - 326903 user_directory_search - 316433 ``` -## Show top 20 rooms by new events count in last 1 day: -```sql -SELECT e.room_id, r.name, COUNT(e.event_id) cnt FROM events e -LEFT JOIN room_stats_state r USING (room_id) -WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 GROUP BY e.room_id, r.name ORDER BY cnt DESC LIMIT 20; -``` - -## Show top 20 users on homeserver by sent events (messages) at last month: -```sql -SELECT user_id, SUM(total_events) - FROM user_stats_historical - WHERE TO_TIMESTAMP(end_ts/1000) AT TIME ZONE 'UTC' > date_trunc('day', now() - interval '1 month') - GROUP BY user_id - ORDER BY SUM(total_events) DESC - LIMIT 20; -``` - -## Show last 100 messages from needed user, with room names: -```sql -SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json FROM events e - LEFT JOIN event_json j USING (room_id) - LEFT JOIN room_stats_state r USING (room_id) - WHERE sender = '@LOGIN:example.com' - AND e.type = 'm.room.message' - ORDER BY stream_ordering DESC - LIMIT 100; -``` - ## Show top 20 larger tables by storage size ```sql SELECT nspname || '.' || relname AS "relation", - pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" - FROM pg_class C - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) + pg_size_pretty(pg_total_relation_size(c.oid)) AS "total_size" + FROM pg_class c + LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) WHERE nspname NOT IN ('pg_catalog', 'information_schema') - AND C.relkind <> 'i' + AND c.relkind <> 'i' AND nspname !~ '^pg_toast' - ORDER BY pg_total_relation_size(C.oid) DESC + ORDER BY pg_total_relation_size(c.oid) DESC LIMIT 20; ``` + ### Result example: ``` public.state_groups_state - 27 GB @@ -130,8 +86,93 @@ public.device_lists_remote_cache - 124 MB public.state_group_edges - 122 MB ``` +## Show top 20 larger rooms by state events count +You get the same information when you use the +[admin API](../../admin_api/rooms.md#list-room-api) +and set parameter `order_by=state_events`. + +```sql +SELECT r.name, s.room_id, s.current_state_events + FROM room_stats_current s + LEFT JOIN room_stats_state r USING (room_id) + ORDER BY current_state_events DESC + LIMIT 20; +``` + +and by state_group_events count: +```sql +SELECT rss.name, s.room_id, COUNT(s.room_id) + FROM state_groups_state s + LEFT JOIN room_stats_state rss USING (room_id) + GROUP BY s.room_id, rss.name + ORDER BY COUNT(s.room_id) DESC + LIMIT 20; +``` + +plus same, but with join removed for performance reasons: +```sql +SELECT s.room_id, COUNT(s.room_id) + FROM state_groups_state s + GROUP BY s.room_id + ORDER BY COUNT(s.room_id) DESC + LIMIT 20; +``` + +## Show top 20 rooms by new events count in last 1 day: +```sql +SELECT e.room_id, r.name, COUNT(e.event_id) cnt + FROM events e + LEFT JOIN room_stats_state r USING (room_id) + WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 + GROUP BY e.room_id, r.name + ORDER BY cnt DESC + LIMIT 20; +``` + +## Show top 20 users on homeserver by sent events (messages) at last month: +Caution. This query does not use any indexes, can be slow and create load on the database. +```sql +SELECT COUNT(*), sender + FROM events + WHERE (type = 'm.room.encrypted' OR type = 'm.room.message') + AND origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 month') * 1000 + GROUP BY sender + ORDER BY COUNT(*) DESC + LIMIT 20; +``` + +## Show last 100 messages from needed user, with room names: +```sql +SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json + FROM events e + LEFT JOIN event_json j USING (room_id) + LEFT JOIN room_stats_state r USING (room_id) + WHERE sender = '@LOGIN:example.com' + AND e.type = 'm.room.message' + ORDER BY stream_ordering DESC + LIMIT 100; +``` + ## Show rooms with names, sorted by events in this rooms -`echo "select event_json.room_id,room_stats_state.name from event_json,room_stats_state where room_stats_state.room_id=event_json.room_id" | psql synapse | sort | uniq -c | sort -n` + +**Sort and order with bash** +```bash +echo "SELECT event_json.room_id, room_stats_state.name FROM event_json, room_stats_state \ +WHERE room_stats_state.room_id = event_json.room_id" | psql -d synapse -h localhost -U synapse_user -t \ +| sort | uniq -c | sort -n +``` +Documentation for `psql` command line parameters: https://www.postgresql.org/docs/current/app-psql.html + +**Sort and order with SQL** +```sql +SELECT COUNT(*), event_json.room_id, room_stats_state.name + FROM event_json, room_stats_state + WHERE room_stats_state.room_id = event_json.room_id + GROUP BY event_json.room_id, room_stats_state.name + ORDER BY COUNT(*) DESC + LIMIT 50; +``` + ### Result example: ``` 9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix @@ -145,12 +186,22 @@ public.state_group_edges - 122 MB ``` ## Lookup room state info by list of room_id +You get the same information when you use the +[admin API](../../admin_api/rooms.md#room-details-api). ```sql -SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules -FROM room_stats_state rss -LEFT JOIN room_stats_current rsc USING (room_id) -WHERE room_id IN (WHERE room_id IN ( - '!OGEhHVWSdvArJzumhm:matrix.org', - '!YTvKGNlinIzlkMTVRl:matrix.org' -) -``` \ No newline at end of file +SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, + rsc.joined_members, rsc.local_users_in_room, rss.join_rules + FROM room_stats_state rss + LEFT JOIN room_stats_current rsc USING (room_id) + WHERE room_id IN ( WHERE room_id IN ( + '!OGEhHVWSdvArJzumhm:matrix.org', + '!YTvKGNlinIzlkMTVRl:matrix.org' + ); +``` + +## Show users and devices that have not been online for a while +```sql +SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_seen" + FROM devices + WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000; +``` From d80a7ab151afd6919b4e14a258105ab59146d528 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 9 May 2022 14:46:43 -0700 Subject: [PATCH 73/87] Update `replication.md` with info on TCP module structure (#12621) --- changelog.d/12621.doc | 1 + docs/replication.md | 5 +++++ synapse/replication/tcp/__init__.py | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12621.doc diff --git a/changelog.d/12621.doc b/changelog.d/12621.doc new file mode 100644 index 0000000000..d29fb9cb99 --- /dev/null +++ b/changelog.d/12621.doc @@ -0,0 +1 @@ +Add information about the TCP replication module to docs. diff --git a/docs/replication.md b/docs/replication.md index e82df0de8a..108da9a065 100644 --- a/docs/replication.md +++ b/docs/replication.md @@ -35,3 +35,8 @@ See [the TCP replication documentation](tcp_replication.md). There are read-only version of the synapse storage layer in `synapse/replication/slave/storage` that use the response of the replication API to invalidate their caches. + +### The TCP Replication Module +Information about how the tcp replication module is structured, including how +the classes interact, can be found in +`synapse/replication/tcp/__init__.py` diff --git a/synapse/replication/tcp/__init__.py b/synapse/replication/tcp/__init__.py index 1fa60af8e6..2c5f5f0bf8 100644 --- a/synapse/replication/tcp/__init__.py +++ b/synapse/replication/tcp/__init__.py @@ -15,7 +15,7 @@ """This module implements the TCP replication protocol used by synapse to communicate between the master process and its workers (when they're enabled). -Further details can be found in docs/tcp_replication.rst +Further details can be found in docs/tcp_replication.md Structure of the module: From ade30088212091284d066fc31a755a8a21050677 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Tue, 10 May 2022 09:57:36 +0200 Subject: [PATCH 74/87] Implement MSC3786: Add a default push rule to ignore m.room.server_acl events (#12601) Fixes vector-im/element-web#20788 Implements matrix-org/matrix-spec-proposals#3786 --- changelog.d/12601.feature | 1 + synapse/config/experimental.py | 3 ++ synapse/push/baserules.py | 15 +++++++ synapse/storage/databases/main/push_rule.py | 48 ++++++++++++++++----- 4 files changed, 56 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12601.feature diff --git a/changelog.d/12601.feature b/changelog.d/12601.feature new file mode 100644 index 0000000000..c13360ff35 --- /dev/null +++ b/changelog.d/12601.feature @@ -0,0 +1 @@ +Implement MSC3786: Add a default push rule to ignore m.room.server_acl events. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index abed5e7edb..b20d949689 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -81,3 +81,6 @@ class ExperimentalConfig(Config): # MSC2815 (allow room moderators to view redacted event content) self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False) + + # MSC3786 (Add a default push rule to ignore m.room.server_acl events) + self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index f42f605f23..a17b35a605 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -277,6 +277,21 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ ], "actions": ["dont_notify"], }, + # XXX: This is an experimental rule that is only enabled if msc3786_enabled + # is enabled, if it is not the rule gets filtered out in _load_rules() in + # PushRulesWorkerStore + { + "rule_id": "global/override/.org.matrix.msc3786.rule.room.server_acl", + "conditions": [ + { + "kind": "event_match", + "key": "type", + "pattern": "m.room.server_acl", + "_cache_key": "_room_server_acl", + } + ], + "actions": ["dont_notify"], + }, ] diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index eb85bbd392..4ed913e248 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -17,6 +17,7 @@ import logging from typing import TYPE_CHECKING, Dict, List, Tuple, Union from synapse.api.errors import StoreError +from synapse.config.homeserver import ExperimentalConfig from synapse.push.baserules import list_with_base_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore, db_to_json @@ -42,7 +43,21 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -def _load_rules(rawrules, enabled_map): +def _is_experimental_rule_enabled( + rule_id: str, experimental_config: ExperimentalConfig +) -> bool: + """Used by `_load_rules` to filter out experimental rules when they + have not been enabled. + """ + if ( + rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl" + and not experimental_config.msc3786_enabled + ): + return False + return True + + +def _load_rules(rawrules, enabled_map, experimental_config: ExperimentalConfig): ruleslist = [] for rawrule in rawrules: rule = dict(rawrule) @@ -51,17 +66,26 @@ def _load_rules(rawrules, enabled_map): rule["default"] = False ruleslist.append(rule) - # We're going to be mutating this a lot, so do a deep copy - rules = list(list_with_base_rules(ruleslist)) + # We're going to be mutating this a lot, so copy it. We also filter out + # any experimental default push rules that aren't enabled. + rules = [ + rule + for rule in list_with_base_rules(ruleslist) + if _is_experimental_rule_enabled(rule["rule_id"], experimental_config) + ] for i, rule in enumerate(rules): rule_id = rule["rule_id"] - if rule_id in enabled_map: - if rule.get("enabled", True) != bool(enabled_map[rule_id]): - # Rules are cached across users. - rule = dict(rule) - rule["enabled"] = bool(enabled_map[rule_id]) - rules[i] = rule + + if rule_id not in enabled_map: + continue + if rule.get("enabled", True) == bool(enabled_map[rule_id]): + continue + + # Rules are cached across users. + rule = dict(rule) + rule["enabled"] = bool(enabled_map[rule_id]) + rules[i] = rule return rules @@ -141,7 +165,7 @@ class PushRulesWorkerStore( enabled_map = await self.get_push_rules_enabled_for_user(user_id) - return _load_rules(rows, enabled_map) + return _load_rules(rows, enabled_map, self.hs.config.experimental) @cached(max_entries=5000) async def get_push_rules_enabled_for_user(self, user_id) -> Dict[str, bool]: @@ -200,7 +224,9 @@ class PushRulesWorkerStore( enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) for user_id, rules in results.items(): - results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) + results[user_id] = _load_rules( + rules, enabled_map_by_user.get(user_id, {}), self.hs.config.experimental + ) return results From 8dd3e0e084304dfc02ff072a1beaed5266cf4e33 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 May 2022 10:39:54 +0100 Subject: [PATCH 75/87] Immediately retry any requests that have backed off when a server comes back online. (#12500) Otherwise it can take up to a minute for any in-flight `/send` requests to be retried. --- changelog.d/12500.misc | 1 + synapse/http/matrixfederationclient.py | 15 ++++- synapse/notifier.py | 8 ++- synapse/util/async_helpers.py | 57 ++++++++++++++++++ synapse/util/retryutils.py | 24 +++++++- tests/util/test_async_helpers.py | 80 ++++++++++++++++++++++++++ 6 files changed, 179 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12500.misc diff --git a/changelog.d/12500.misc b/changelog.d/12500.misc new file mode 100644 index 0000000000..dbe3f7f5d1 --- /dev/null +++ b/changelog.d/12500.misc @@ -0,0 +1 @@ +Immediately retry any requests that have backed off when a server comes back online. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index e686445955..c2ec3caa0e 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -73,7 +73,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.types import JsonDict from synapse.util import json_decoder -from synapse.util.async_helpers import timeout_deferred +from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -353,6 +353,13 @@ class MatrixFederationHttpClient: self._cooperator = Cooperator(scheduler=schedule) + self._sleeper = AwakenableSleeper(self.reactor) + + def wake_destination(self, destination: str) -> None: + """Called when the remote server may have come back online.""" + + self._sleeper.wake(destination) + async def _send_request_with_optional_trailing_slash( self, request: MatrixFederationRequest, @@ -474,6 +481,8 @@ class MatrixFederationHttpClient: self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, + notifier=self.hs.get_notifier(), + replication_client=self.hs.get_replication_command_handler(), ) method_bytes = request.method.encode("ascii") @@ -664,7 +673,9 @@ class MatrixFederationHttpClient: delay, ) - await self.clock.sleep(delay) + # Sleep for the calculated delay, or wake up immediately + # if we get notified that the server is back up. + await self._sleeper.sleep(request.destination, delay * 1000) retries_left -= 1 else: raise diff --git a/synapse/notifier.py b/synapse/notifier.py index 16d15a1f33..01a50b9d62 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -228,9 +228,7 @@ class Notifier: # Called when there are new things to stream over replication self.replication_callbacks: List[Callable[[], None]] = [] - # Called when remote servers have come back online after having been - # down. - self.remote_server_up_callbacks: List[Callable[[str], None]] = [] + self._federation_client = hs.get_federation_http_client() self._third_party_rules = hs.get_third_party_event_rules() @@ -731,3 +729,7 @@ class Notifier: # circular dependencies. if self.federation_sender: self.federation_sender.wake_destination(server) + + # Tell the federation client about the fact the server is back up, so + # that any in flight requests can be immediately retried. + self._federation_client.wake_destination(server) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index b91020117f..7f1d41eb3c 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -778,3 +778,60 @@ def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]: new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel) deferred.chainDeferred(new_deferred) return new_deferred + + +class AwakenableSleeper: + """Allows explicitly waking up deferreds related to an entity that are + currently sleeping. + """ + + def __init__(self, reactor: IReactorTime) -> None: + self._streams: Dict[str, Set[defer.Deferred[None]]] = {} + self._reactor = reactor + + def wake(self, name: str) -> None: + """Wake everything related to `name` that is currently sleeping.""" + stream_set = self._streams.pop(name, set()) + for deferred in stream_set: + try: + with PreserveLoggingContext(): + deferred.callback(None) + except Exception: + pass + + async def sleep(self, name: str, delay_ms: int) -> None: + """Sleep for the given number of milliseconds, or return if the given + `name` is explicitly woken up. + """ + + # Create a deferred that gets called in N seconds + sleep_deferred: "defer.Deferred[None]" = defer.Deferred() + call = self._reactor.callLater(delay_ms / 1000, sleep_deferred.callback, None) + + # Create a deferred that will get called if `wake` is called with + # the same `name`. + stream_set = self._streams.setdefault(name, set()) + notify_deferred: "defer.Deferred[None]" = defer.Deferred() + stream_set.add(notify_deferred) + + try: + # Wait for either the delay or for `wake` to be called. + await make_deferred_yieldable( + defer.DeferredList( + [sleep_deferred, notify_deferred], + fireOnOneCallback=True, + fireOnOneErrback=True, + consumeErrors=True, + ) + ) + finally: + # Clean up the state + curr_stream_set = self._streams.get(name) + if curr_stream_set is not None: + curr_stream_set.discard(notify_deferred) + if len(curr_stream_set) == 0: + self._streams.pop(name) + + # Cancel the sleep if we were woken up + if call.active(): + call.cancel() diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index d81f2527d7..81bfed268e 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -14,13 +14,17 @@ import logging import random from types import TracebackType -from typing import Any, Optional, Type +from typing import TYPE_CHECKING, Any, Optional, Type import synapse.logging.context from synapse.api.errors import CodeMessageException from synapse.storage import DataStore from synapse.util import Clock +if TYPE_CHECKING: + from synapse.notifier import Notifier + from synapse.replication.tcp.handler import ReplicationCommandHandler + logger = logging.getLogger(__name__) # the initial backoff, after the first transaction fails @@ -131,6 +135,8 @@ class RetryDestinationLimiter: retry_interval: int, backoff_on_404: bool = False, backoff_on_failure: bool = True, + notifier: Optional["Notifier"] = None, + replication_client: Optional["ReplicationCommandHandler"] = None, ): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. @@ -160,6 +166,9 @@ class RetryDestinationLimiter: self.backoff_on_404 = backoff_on_404 self.backoff_on_failure = backoff_on_failure + self.notifier = notifier + self.replication_client = replication_client + def __enter__(self) -> None: pass @@ -239,6 +248,19 @@ class RetryDestinationLimiter: retry_last_ts, self.retry_interval, ) + + if self.notifier: + # Inform the relevant places that the remote server is back up. + self.notifier.notify_remote_server_up(self.destination) + + if self.replication_client: + # If we're on a worker we try and inform master about this. The + # replication client doesn't hook into the notifier to avoid + # infinite loops where we send a `REMOTE_SERVER_UP` command to + # master, which then echoes it back to us which in turn pokes + # the notifier. + self.replication_client.send_remote_server_up(self.destination) + except Exception: logger.exception("Failed to store destination_retry_timings") diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index daacc54c72..9d5010bf92 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -28,6 +28,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.async_helpers import ( + AwakenableSleeper, ObservableDeferred, concurrently_execute, delay_cancellation, @@ -35,6 +36,7 @@ from synapse.util.async_helpers import ( timeout_deferred, ) +from tests.server import get_clock from tests.unittest import TestCase @@ -496,3 +498,81 @@ class DelayCancellationTests(TestCase): # logging context. blocking_d.callback(None) self.successResultOf(d) + + +class AwakenableSleeperTests(TestCase): + "Tests AwakenableSleeper" + + def test_sleep(self): + reactor, _ = get_clock() + sleeper = AwakenableSleeper(reactor) + + d = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + reactor.pump([0.0]) + self.assertFalse(d.called) + + reactor.advance(0.5) + self.assertFalse(d.called) + + reactor.advance(0.6) + self.assertTrue(d.called) + + def test_explicit_wake(self): + reactor, _ = get_clock() + sleeper = AwakenableSleeper(reactor) + + d = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + reactor.pump([0.0]) + self.assertFalse(d.called) + + reactor.advance(0.5) + self.assertFalse(d.called) + + sleeper.wake("name") + self.assertTrue(d.called) + + reactor.advance(0.6) + + def test_multiple_sleepers_timeout(self): + reactor, _ = get_clock() + sleeper = AwakenableSleeper(reactor) + + d1 = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + reactor.advance(0.6) + self.assertFalse(d1.called) + + # Add another sleeper + d2 = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + # Only the first sleep should time out now. + reactor.advance(0.6) + self.assertTrue(d1.called) + self.assertFalse(d2.called) + + reactor.advance(0.6) + self.assertTrue(d2.called) + + def test_multiple_sleepers_wake(self): + reactor, _ = get_clock() + sleeper = AwakenableSleeper(reactor) + + d1 = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + reactor.advance(0.5) + self.assertFalse(d1.called) + + # Add another sleeper + d2 = defer.ensureDeferred(sleeper.sleep("name", 1000)) + + # Neither should fire yet + reactor.advance(0.3) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + # Explicitly waking both up works + sleeper.wake("name") + self.assertTrue(d1.called) + self.assertTrue(d2.called) From e5fd23fb6f02f655aa91b5a7be9e5bb839438e25 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 10:45:13 +0100 Subject: [PATCH 76/87] 1.59.0rc1 --- CHANGES.md | 92 +++++++++++++++++++++++++++++++++++++++ changelog.d/11507.feature | 1 - changelog.d/12168.feature | 1 - changelog.d/12273.bugfix | 1 - changelog.d/12356.misc | 1 - changelog.d/12406.feature | 1 - changelog.d/12452.feature | 1 - changelog.d/12480.misc | 1 - changelog.d/12485.misc | 1 - changelog.d/12500.misc | 1 - changelog.d/12505.misc | 1 - changelog.d/12526.feature | 1 - changelog.d/12531.misc | 1 - changelog.d/12536.doc | 1 - changelog.d/12541.docker | 1 - changelog.d/12544.bugfix | 1 - changelog.d/12556.misc | 1 - changelog.d/12564.misc | 1 - changelog.d/12568.misc | 1 - changelog.d/12570.bugfix | 1 - changelog.d/12573.docker | 1 - changelog.d/12576.misc | 1 - changelog.d/12577.misc | 1 - changelog.d/12579.doc | 1 - changelog.d/12580.bugfix | 1 - changelog.d/12581.misc | 1 - changelog.d/12582.misc | 1 - changelog.d/12587.misc | 1 - changelog.d/12589.misc | 1 - changelog.d/12594.bugfix | 1 - changelog.d/12596.removal | 1 - changelog.d/12597.removal | 2 - changelog.d/12599.misc | 1 - changelog.d/12601.feature | 1 - changelog.d/12602.misc | 1 - changelog.d/12608.misc | 1 - changelog.d/12610.misc | 1 - changelog.d/12612.bugfix | 1 - changelog.d/12613.removal | 1 - changelog.d/12614.misc | 1 - changelog.d/12616.misc | 1 - changelog.d/12619.feature | 1 - changelog.d/12620.misc | 1 - changelog.d/12621.doc | 1 - changelog.d/12624.misc | 1 - changelog.d/12627.doc | 1 - changelog.d/12632.misc | 1 - changelog.d/12633.bugfix | 1 - changelog.d/12635.feature | 1 - changelog.d/12636.feature | 1 - changelog.d/12637.misc | 1 - changelog.d/12639.bugfix | 1 - changelog.d/12650.misc | 1 - changelog.d/12652.misc | 1 - changelog.d/12656.misc | 1 - changelog.d/12657.bugfix | 1 - changelog.d/12663.misc | 1 - changelog.d/12664.doc | 1 - changelog.d/12665.misc | 1 - changelog.d/12666.misc | 1 - changelog.d/12667.misc | 1 - changelog.d/12670.feature | 1 - changelog.d/12671.misc | 1 - debian/changelog | 5 ++- pyproject.toml | 2 +- 65 files changed, 96 insertions(+), 66 deletions(-) delete mode 100644 changelog.d/11507.feature delete mode 100644 changelog.d/12168.feature delete mode 100644 changelog.d/12273.bugfix delete mode 100644 changelog.d/12356.misc delete mode 100644 changelog.d/12406.feature delete mode 100644 changelog.d/12452.feature delete mode 100644 changelog.d/12480.misc delete mode 100644 changelog.d/12485.misc delete mode 100644 changelog.d/12500.misc delete mode 100644 changelog.d/12505.misc delete mode 100644 changelog.d/12526.feature delete mode 100644 changelog.d/12531.misc delete mode 100644 changelog.d/12536.doc delete mode 100644 changelog.d/12541.docker delete mode 100644 changelog.d/12544.bugfix delete mode 100644 changelog.d/12556.misc delete mode 100644 changelog.d/12564.misc delete mode 100644 changelog.d/12568.misc delete mode 100644 changelog.d/12570.bugfix delete mode 100644 changelog.d/12573.docker delete mode 100644 changelog.d/12576.misc delete mode 100644 changelog.d/12577.misc delete mode 100644 changelog.d/12579.doc delete mode 100644 changelog.d/12580.bugfix delete mode 100644 changelog.d/12581.misc delete mode 100644 changelog.d/12582.misc delete mode 100644 changelog.d/12587.misc delete mode 100644 changelog.d/12589.misc delete mode 100644 changelog.d/12594.bugfix delete mode 100644 changelog.d/12596.removal delete mode 100644 changelog.d/12597.removal delete mode 100644 changelog.d/12599.misc delete mode 100644 changelog.d/12601.feature delete mode 100644 changelog.d/12602.misc delete mode 100644 changelog.d/12608.misc delete mode 100644 changelog.d/12610.misc delete mode 100644 changelog.d/12612.bugfix delete mode 100644 changelog.d/12613.removal delete mode 100644 changelog.d/12614.misc delete mode 100644 changelog.d/12616.misc delete mode 100644 changelog.d/12619.feature delete mode 100644 changelog.d/12620.misc delete mode 100644 changelog.d/12621.doc delete mode 100644 changelog.d/12624.misc delete mode 100644 changelog.d/12627.doc delete mode 100644 changelog.d/12632.misc delete mode 100644 changelog.d/12633.bugfix delete mode 100644 changelog.d/12635.feature delete mode 100644 changelog.d/12636.feature delete mode 100644 changelog.d/12637.misc delete mode 100644 changelog.d/12639.bugfix delete mode 100644 changelog.d/12650.misc delete mode 100644 changelog.d/12652.misc delete mode 100644 changelog.d/12656.misc delete mode 100644 changelog.d/12657.bugfix delete mode 100644 changelog.d/12663.misc delete mode 100644 changelog.d/12664.doc delete mode 100644 changelog.d/12665.misc delete mode 100644 changelog.d/12666.misc delete mode 100644 changelog.d/12667.misc delete mode 100644 changelog.d/12670.feature delete mode 100644 changelog.d/12671.misc diff --git a/CHANGES.md b/CHANGES.md index 88b053897e..9376e83870 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,95 @@ +Synapse 1.59.0rc1 (2022-05-10) +============================== + +Features +-------- + +- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507)) +- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670)) +- Add a module API to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) +- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) +- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) +- Implement MSC3786: Add a default push rule to ignore m.room.server_acl events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) +- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) +- Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) +- Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) +- Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) +- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594)) +- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) +- Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) +- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12639](https://github.com/matrix-org/synapse/issues/12639)) +- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657)) + + +Updates to the Docker image +--------------------------- + +- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541)) +- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573)) + + +Improved Documentation +---------------------- + +- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536)) +- Add missing linebreak to pipx install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) +- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621)) +- Fixes to the formatting of README.rst. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) +- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664)) + + +Deprecations and Removals +------------------------- + +- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596)) +- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from + [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) +- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613)) + + +Internal Changes +---------------- + +- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) +- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480)) +- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) +- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500)) +- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505)) +- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) +- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) +- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564)) +- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568)) +- Allow unused `#type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) +- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577)) +- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581)) +- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582)) +- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663)) +- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589)) +- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599)) +- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602)) +- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) +- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610)) +- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614)) +- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) +- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620)) +- Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) +- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632)) +- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) +- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) +- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652)) +- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) +- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665)) +- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666)) +- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667)) +- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671)) + + Synapse 1.59.0 ============== diff --git a/changelog.d/11507.feature b/changelog.d/11507.feature deleted file mode 100644 index 72c5690cca..0000000000 --- a/changelog.d/11507.feature +++ /dev/null @@ -1 +0,0 @@ -Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. diff --git a/changelog.d/12168.feature b/changelog.d/12168.feature deleted file mode 100644 index cd5c45029e..0000000000 --- a/changelog.d/12168.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/12273.bugfix b/changelog.d/12273.bugfix deleted file mode 100644 index f8d7b6c889..0000000000 --- a/changelog.d/12273.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations. diff --git a/changelog.d/12356.misc b/changelog.d/12356.misc deleted file mode 100644 index 43e1929106..0000000000 --- a/changelog.d/12356.misc +++ /dev/null @@ -1 +0,0 @@ -Fix scripts-dev to pass typechecking. \ No newline at end of file diff --git a/changelog.d/12406.feature b/changelog.d/12406.feature deleted file mode 100644 index e345afdee7..0000000000 --- a/changelog.d/12406.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module API to allow modules to change actions for existing push rules of local users. diff --git a/changelog.d/12452.feature b/changelog.d/12452.feature deleted file mode 100644 index 22f054d344..0000000000 --- a/changelog.d/12452.feature +++ /dev/null @@ -1 +0,0 @@ -Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. \ No newline at end of file diff --git a/changelog.d/12480.misc b/changelog.d/12480.misc deleted file mode 100644 index 18a85e7b15..0000000000 --- a/changelog.d/12480.misc +++ /dev/null @@ -1 +0,0 @@ -Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. \ No newline at end of file diff --git a/changelog.d/12485.misc b/changelog.d/12485.misc deleted file mode 100644 index e793d08e5e..0000000000 --- a/changelog.d/12485.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type hints to datastore. \ No newline at end of file diff --git a/changelog.d/12500.misc b/changelog.d/12500.misc deleted file mode 100644 index dbe3f7f5d1..0000000000 --- a/changelog.d/12500.misc +++ /dev/null @@ -1 +0,0 @@ -Immediately retry any requests that have backed off when a server comes back online. diff --git a/changelog.d/12505.misc b/changelog.d/12505.misc deleted file mode 100644 index a691d7962f..0000000000 --- a/changelog.d/12505.misc +++ /dev/null @@ -1 +0,0 @@ -Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. diff --git a/changelog.d/12526.feature b/changelog.d/12526.feature deleted file mode 100644 index c01596282c..0000000000 --- a/changelog.d/12526.feature +++ /dev/null @@ -1 +0,0 @@ -Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. \ No newline at end of file diff --git a/changelog.d/12531.misc b/changelog.d/12531.misc deleted file mode 100644 index 412fc9b6dc..0000000000 --- a/changelog.d/12531.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused `# type: ignore`s. diff --git a/changelog.d/12536.doc b/changelog.d/12536.doc deleted file mode 100644 index 4034c42076..0000000000 --- a/changelog.d/12536.doc +++ /dev/null @@ -1 +0,0 @@ -Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. diff --git a/changelog.d/12541.docker b/changelog.d/12541.docker deleted file mode 100644 index c3b9c31657..0000000000 --- a/changelog.d/12541.docker +++ /dev/null @@ -1 +0,0 @@ -Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. diff --git a/changelog.d/12544.bugfix b/changelog.d/12544.bugfix deleted file mode 100644 index b5169cd831..0000000000 --- a/changelog.d/12544.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. diff --git a/changelog.d/12556.misc b/changelog.d/12556.misc deleted file mode 100644 index dc245397fb..0000000000 --- a/changelog.d/12556.misc +++ /dev/null @@ -1 +0,0 @@ -Release script: confirm the commit to be tagged before tagging. diff --git a/changelog.d/12564.misc b/changelog.d/12564.misc deleted file mode 100644 index 207c322464..0000000000 --- a/changelog.d/12564.misc +++ /dev/null @@ -1 +0,0 @@ -Consistently check if an object is a `frozendict`. diff --git a/changelog.d/12568.misc b/changelog.d/12568.misc deleted file mode 100644 index f64dc67c4f..0000000000 --- a/changelog.d/12568.misc +++ /dev/null @@ -1 +0,0 @@ -Protect module callbacks with read semantics against cancellation. diff --git a/changelog.d/12570.bugfix b/changelog.d/12570.bugfix deleted file mode 100644 index 1038646f35..0000000000 --- a/changelog.d/12570.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. diff --git a/changelog.d/12573.docker b/changelog.d/12573.docker deleted file mode 100644 index 5cc8de50ac..0000000000 --- a/changelog.d/12573.docker +++ /dev/null @@ -1 +0,0 @@ -Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by henryclw. \ No newline at end of file diff --git a/changelog.d/12576.misc b/changelog.d/12576.misc deleted file mode 100644 index 71022c8633..0000000000 --- a/changelog.d/12576.misc +++ /dev/null @@ -1 +0,0 @@ -Allow unused `#type: ignore` comments in bleeding edge CI jobs. diff --git a/changelog.d/12577.misc b/changelog.d/12577.misc deleted file mode 100644 index 8c4c47ad52..0000000000 --- a/changelog.d/12577.misc +++ /dev/null @@ -1 +0,0 @@ -Improve comments and error messages around access tokens. \ No newline at end of file diff --git a/changelog.d/12579.doc b/changelog.d/12579.doc deleted file mode 100644 index bcec5fe1af..0000000000 --- a/changelog.d/12579.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing linebreak to pipx install instructions. diff --git a/changelog.d/12580.bugfix b/changelog.d/12580.bugfix deleted file mode 100644 index bedce405e2..0000000000 --- a/changelog.d/12580.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing. diff --git a/changelog.d/12581.misc b/changelog.d/12581.misc deleted file mode 100644 index 38d40b262b..0000000000 --- a/changelog.d/12581.misc +++ /dev/null @@ -1 +0,0 @@ -Improve docstrings for the receipts store. diff --git a/changelog.d/12582.misc b/changelog.d/12582.misc deleted file mode 100644 index 5fa9c9afe8..0000000000 --- a/changelog.d/12582.misc +++ /dev/null @@ -1 +0,0 @@ -Use constants for read-receipts in tests. diff --git a/changelog.d/12587.misc b/changelog.d/12587.misc deleted file mode 100644 index 3b466f1ddf..0000000000 --- a/changelog.d/12587.misc +++ /dev/null @@ -1 +0,0 @@ -Log status code of cancelled requests as 499 and avoid logging stack traces for them. diff --git a/changelog.d/12589.misc b/changelog.d/12589.misc deleted file mode 100644 index d362828d2e..0000000000 --- a/changelog.d/12589.misc +++ /dev/null @@ -1 +0,0 @@ -Remove special-case for `twisted` logger from default log config. diff --git a/changelog.d/12594.bugfix b/changelog.d/12594.bugfix deleted file mode 100644 index 7411d9c079..0000000000 --- a/changelog.d/12594.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. diff --git a/changelog.d/12596.removal b/changelog.d/12596.removal deleted file mode 100644 index 14fbfb3954..0000000000 --- a/changelog.d/12596.removal +++ /dev/null @@ -1 +0,0 @@ -Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). diff --git a/changelog.d/12597.removal b/changelog.d/12597.removal deleted file mode 100644 index 7927f1d68d..0000000000 --- a/changelog.d/12597.removal +++ /dev/null @@ -1,2 +0,0 @@ -Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from -[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). diff --git a/changelog.d/12599.misc b/changelog.d/12599.misc deleted file mode 100644 index d01278bbce..0000000000 --- a/changelog.d/12599.misc +++ /dev/null @@ -1 +0,0 @@ -Use `getClientAddress` instead of the deprecated `getClientIP`. diff --git a/changelog.d/12601.feature b/changelog.d/12601.feature deleted file mode 100644 index c13360ff35..0000000000 --- a/changelog.d/12601.feature +++ /dev/null @@ -1 +0,0 @@ -Implement MSC3786: Add a default push rule to ignore m.room.server_acl events. diff --git a/changelog.d/12602.misc b/changelog.d/12602.misc deleted file mode 100644 index cdccc5c316..0000000000 --- a/changelog.d/12602.misc +++ /dev/null @@ -1 +0,0 @@ -Add link to documentation in Grafana Dashboard. diff --git a/changelog.d/12608.misc b/changelog.d/12608.misc deleted file mode 100644 index 38272118fb..0000000000 --- a/changelog.d/12608.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant lines of config from `mypy.ini`. \ No newline at end of file diff --git a/changelog.d/12610.misc b/changelog.d/12610.misc deleted file mode 100644 index 02efe0c72f..0000000000 --- a/changelog.d/12610.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce log spam when running multiple event persisters. diff --git a/changelog.d/12612.bugfix b/changelog.d/12612.bugfix deleted file mode 100644 index c39e97f0cb..0000000000 --- a/changelog.d/12612.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the announcement text generated by the Synapse release development script. \ No newline at end of file diff --git a/changelog.d/12613.removal b/changelog.d/12613.removal deleted file mode 100644 index b1a9e207b0..0000000000 --- a/changelog.d/12613.removal +++ /dev/null @@ -1 +0,0 @@ -Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. diff --git a/changelog.d/12614.misc b/changelog.d/12614.misc deleted file mode 100644 index 79022df127..0000000000 --- a/changelog.d/12614.misc +++ /dev/null @@ -1 +0,0 @@ -Add extra debug logging to federation sender. diff --git a/changelog.d/12616.misc b/changelog.d/12616.misc deleted file mode 100644 index d17ce24cdf..0000000000 --- a/changelog.d/12616.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent remote homeservers from requesting local user device names by default. \ No newline at end of file diff --git a/changelog.d/12619.feature b/changelog.d/12619.feature deleted file mode 100644 index b0fc0f5fed..0000000000 --- a/changelog.d/12619.feature +++ /dev/null @@ -1 +0,0 @@ -Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. diff --git a/changelog.d/12620.misc b/changelog.d/12620.misc deleted file mode 100644 index 63f8e540c3..0000000000 --- a/changelog.d/12620.misc +++ /dev/null @@ -1 +0,0 @@ -Add a consistency check on events which we read from the database. diff --git a/changelog.d/12621.doc b/changelog.d/12621.doc deleted file mode 100644 index d29fb9cb99..0000000000 --- a/changelog.d/12621.doc +++ /dev/null @@ -1 +0,0 @@ -Add information about the TCP replication module to docs. diff --git a/changelog.d/12624.misc b/changelog.d/12624.misc deleted file mode 100644 index 8772d40fa7..0000000000 --- a/changelog.d/12624.misc +++ /dev/null @@ -1 +0,0 @@ -Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh. diff --git a/changelog.d/12627.doc b/changelog.d/12627.doc deleted file mode 100644 index 3a787dfef2..0000000000 --- a/changelog.d/12627.doc +++ /dev/null @@ -1 +0,0 @@ -Fixes to the formatting of README.rst. diff --git a/changelog.d/12632.misc b/changelog.d/12632.misc deleted file mode 100644 index 9e4ba79c79..0000000000 --- a/changelog.d/12632.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused code related to receipts. diff --git a/changelog.d/12633.bugfix b/changelog.d/12633.bugfix deleted file mode 100644 index 32332acd9a..0000000000 --- a/changelog.d/12633.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. diff --git a/changelog.d/12635.feature b/changelog.d/12635.feature deleted file mode 100644 index cd5c45029e..0000000000 --- a/changelog.d/12635.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/12636.feature b/changelog.d/12636.feature deleted file mode 100644 index cd5c45029e..0000000000 --- a/changelog.d/12636.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/12637.misc b/changelog.d/12637.misc deleted file mode 100644 index 735257787f..0000000000 --- a/changelog.d/12637.misc +++ /dev/null @@ -1 +0,0 @@ -Minor improvements to the scripts for running Synapse in worker mode under Complement. diff --git a/changelog.d/12639.bugfix b/changelog.d/12639.bugfix deleted file mode 100644 index c01596282c..0000000000 --- a/changelog.d/12639.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. \ No newline at end of file diff --git a/changelog.d/12650.misc b/changelog.d/12650.misc deleted file mode 100644 index 07bb4ce5a9..0000000000 --- a/changelog.d/12650.misc +++ /dev/null @@ -1 +0,0 @@ -Update to mypy 0.950. \ No newline at end of file diff --git a/changelog.d/12652.misc b/changelog.d/12652.misc deleted file mode 100644 index 7b7c1cf5ff..0000000000 --- a/changelog.d/12652.misc +++ /dev/null @@ -1 +0,0 @@ -Move `pympler` back in to the `all` extras. diff --git a/changelog.d/12656.misc b/changelog.d/12656.misc deleted file mode 100644 index 8a8743e614..0000000000 --- a/changelog.d/12656.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent memory leak from reoccurring when presence is disabled. diff --git a/changelog.d/12657.bugfix b/changelog.d/12657.bugfix deleted file mode 100644 index 7547ca40a7..0000000000 --- a/changelog.d/12657.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. diff --git a/changelog.d/12663.misc b/changelog.d/12663.misc deleted file mode 100644 index 3b466f1ddf..0000000000 --- a/changelog.d/12663.misc +++ /dev/null @@ -1 +0,0 @@ -Log status code of cancelled requests as 499 and avoid logging stack traces for them. diff --git a/changelog.d/12664.doc b/changelog.d/12664.doc deleted file mode 100644 index 142d18037a..0000000000 --- a/changelog.d/12664.doc +++ /dev/null @@ -1 +0,0 @@ -Fix docs on how to run specific Complement tests using the `complement.sh` test runner. diff --git a/changelog.d/12665.misc b/changelog.d/12665.misc deleted file mode 100644 index 37b96fea37..0000000000 --- a/changelog.d/12665.misc +++ /dev/null @@ -1 +0,0 @@ -Fix spelling of `M_UNRECOGNIZED` in comments. diff --git a/changelog.d/12666.misc b/changelog.d/12666.misc deleted file mode 100644 index 96268e33f5..0000000000 --- a/changelog.d/12666.misc +++ /dev/null @@ -1 +0,0 @@ -Use `Concatenate` to better annotate `_do_execute`. diff --git a/changelog.d/12667.misc b/changelog.d/12667.misc deleted file mode 100644 index 2b17502d6b..0000000000 --- a/changelog.d/12667.misc +++ /dev/null @@ -1 +0,0 @@ -Use `ParamSpec` to refine type hints. diff --git a/changelog.d/12670.feature b/changelog.d/12670.feature deleted file mode 100644 index cd5c45029e..0000000000 --- a/changelog.d/12670.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/12671.misc b/changelog.d/12671.misc deleted file mode 100644 index 56df4e3831..0000000000 --- a/changelog.d/12671.misc +++ /dev/null @@ -1 +0,0 @@ -Fix mypy against latest pillow stubs. diff --git a/debian/changelog b/debian/changelog index 5b21e0d369..fabc690bae 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,10 +1,11 @@ -matrix-synapse-py3 (1.58.2) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium * Adjust how the `exported-requirements.txt` file is generated as part of the process of building these packages. This affects the package maintainers only; end-users are unaffected. + * New Synapse release 1.59.0rc1. - -- Synapse Packaging team Fri, 06 May 2022 13:49:29 +0100 + -- Synapse Packaging team Tue, 10 May 2022 10:45:08 +0100 matrix-synapse-py3 (1.58.1) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index 2c4b7eb08e..e3d81ae5f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.58.1" +version = "1.59.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 2cdac6f5854fa8cf2ace6db335a3459db5cde329 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:06:58 +0100 Subject: [PATCH 77/87] Adjust changelog --- CHANGES.md | 54 +++++++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9376e83870..034045a586 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,44 +1,52 @@ Synapse 1.59.0rc1 (2022-05-10) ============================== +This release makes several changes that server administrators should be aware of: + +- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) +- The `synapse.app.appservice` is now deprecated. ([\#12452])(https://github.com/matrix-org/synapse/pull/12452)) + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details. + +Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) + Features -------- - Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507)) - Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670)) -- Add a module API to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) +- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) - Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) - Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) -- Implement MSC3786: Add a default push rule to ignore m.room.server_acl events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) +- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore m.room.server_acl events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) - Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) Bugfixes -------- -- Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) -- Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) -- Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) -- Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) +- Fix a bug introduced in Synapse v1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) +- Fix a bug introduced in Synapse v1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) +- Fix a bug introduced in Synapse v1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) +- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) - Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594)) -- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) - Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) -- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12639](https://github.com/matrix-org/synapse/issues/12639)) - Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657)) +- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) Updates to the Docker image --------------------------- - Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541)) -- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573)) +- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573)) Improved Documentation ---------------------- - Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536)) -- Add missing linebreak to pipx install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) +- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) - Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621)) - Fixes to the formatting of README.rst. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) - Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664)) @@ -56,16 +64,12 @@ Deprecations and Removals Internal Changes ---------------- -- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) - Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480)) -- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) - Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500)) - Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505)) -- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) - Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) - Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564)) - Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568)) -- Allow unused `#type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) - Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577)) - Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581)) - Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582)) @@ -73,29 +77,29 @@ Internal Changes - Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589)) - Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599)) - Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602)) -- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) - Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610)) - Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614)) - Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) - Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620)) -- Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) +- Remove use of `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) - Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632)) - Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) -- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) - Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652)) -- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) - Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665)) +- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) + +### Typechecking + +- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) +- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) +- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) +- Allow unused `#type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) +- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) +- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) - Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666)) - Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667)) - Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671)) - -Synapse 1.59.0 -============== - -The non-standard `m.login.jwt` login type has been removed from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. - - Synapse 1.58.1 (2022-05-05) =========================== From 8ef0d85acd460a1e5f64feef89968acda9a9d140 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:07:44 +0100 Subject: [PATCH 78/87] Changelog typo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 034045a586..cec3e29c49 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -93,7 +93,7 @@ Internal Changes - Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) - Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) - Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) -- Allow unused `#type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) +- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) - Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) - Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) - Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666)) From 699192fc1a1055a4bec2345bc80f120f28470c73 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 10 May 2022 11:08:45 +0100 Subject: [PATCH 79/87] Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. (#12654) Co-authored-by: Shay --- changelog.d/12654.feature | 1 + docs/upgrade.md | 25 ++++++++++++++++--------- docs/workers.md | 26 +++++++++++++++++++++++--- synapse/app/admin_cmd.py | 2 +- synapse/app/generic_worker.py | 16 ---------------- synapse/config/server.py | 4 ---- synapse/config/workers.py | 7 +++++++ synapse/handlers/user_directory.py | 2 +- tests/config/test_workers.py | 27 +++++++++++++++++++++++++++ 9 files changed, 76 insertions(+), 34 deletions(-) create mode 100644 changelog.d/12654.feature diff --git a/changelog.d/12654.feature b/changelog.d/12654.feature new file mode 100644 index 0000000000..fd0a4bb0da --- /dev/null +++ b/changelog.d/12654.feature @@ -0,0 +1 @@ +Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index 18c33a4198..fa4b3ef590 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -101,29 +101,36 @@ To re-enable this functionality, set the homeserver config option to `true`. -## Deprecation of the `synapse.app.appservice` worker application type +## Deprecation of the `synapse.app.appservice` and `synapse.app.user_dir` worker application types The `synapse.app.appservice` worker application type allowed you to configure a single worker to use to notify application services of new events, as long as this functionality was disabled on the main process with `notify_appservices: False`. +Further, the `synapse.app.user_dir` worker application type allowed you to configure +a single worker to be responsible for updating the user directory, as long as this +was disabled on the main process with `update_user_directory: False`. To unify Synapse's worker types, the `synapse.app.appservice` worker application type and the `notify_appservices` configuration option have been deprecated. +The `synapse.app.user_dir` worker application type and `update_user_directory` +configuration option have also been deprecated. -To get the same functionality, it's now recommended that the `synapse.app.generic_worker` -worker application type is used and that the `notify_appservices_from_worker` option -is set to the name of a worker. +To get the same functionality as was provided by the deprecated options, it's now recommended that the `synapse.app.generic_worker` +worker application type is used and that the `notify_appservices_from_worker` and/or +`update_user_directory_from_worker` options are set to the name of a worker. -For the time being, `notify_appservices_from_worker` can be used alongside -`synapse.app.appservice` and `notify_appservices` to make it easier to transition -between the two configurations, however please note that: +For the time being, the old options can be used alongside the new options to make +it easier to transition between the two configurations, however please note that: - the options must not contradict each other (otherwise Synapse won't start); and -- the `notify_appservices` option will be removed in a future release of Synapse. +- the `notify_appservices` and `update_user_directory` options will be removed in a future release of Synapse. -Please see [the relevant section of the worker documentation][v1_59_notify_ases_from] for more information. +Please see the [*Notifying Application Services*][v1_59_notify_ases_from] and +[*Updating the User Directory*][v1_59_update_user_dir] sections of the worker +documentation for more information. [v1_59_notify_ases_from]: workers.md#notifying-application-services +[v1_59_update_user_dir]: workers.md#updating-the-user-directory # Upgrading to v1.58.0 diff --git a/docs/workers.md b/docs/workers.md index 1d049b6c4f..553792d238 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -426,7 +426,7 @@ the shared configuration would include: run_background_tasks_on: background_worker ``` -You might also wish to investigate the `update_user_directory` and +You might also wish to investigate the `update_user_directory_from_worker` and `media_instance_running_background_jobs` settings. An example for a dedicated background worker instance: @@ -435,9 +435,26 @@ An example for a dedicated background worker instance: {{#include systemd-with-workers/workers/background_worker.yaml}} ``` +#### Updating the User Directory + +You can designate one generic worker to update the user directory. + +Specify its name in the shared configuration as follows: + +```yaml +update_user_directory_from_worker: worker_name +``` + +This work cannot be load-balanced; please ensure the main process is restarted +after setting this option in the shared configuration! + +This style of configuration supersedes the legacy `synapse.app.user_dir` +worker application type. + + #### Notifying Application Services -You can designate one worker to send output traffic to Application Services. +You can designate one generic worker to send output traffic to Application Services. Specify its name in the shared configuration as follows: @@ -470,7 +487,7 @@ pusher_instances: ### `synapse.app.appservice` -**Deprecated as of Synapse v1.58.** [Use `synapse.app.generic_worker` with the +**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the `notify_appservices_from_worker` option instead.](#notifying-application-services) Handles sending output traffic to Application Services. Doesn't handle any @@ -540,6 +557,9 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for ### `synapse.app.user_dir` +**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the +`update_user_directory_from_worker` option instead.](#updating-the-user-directory) + Handles searches in the user directory. It can handle REST endpoints matching the following regular expressions: diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 2b0d92cbae..2a4c2e59cd 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -210,7 +210,7 @@ def start(config_options: List[str]) -> None: config.logging.no_redirect_stdio = True # Explicitly disable background processes - config.server.update_user_directory = False + config.worker.should_update_user_directory = False config.worker.run_background_tasks = False config.worker.start_pushers = False config.worker.pusher_shard_config.instances = [] diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 07dddc0b13..2a9480a5c1 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -441,22 +441,6 @@ def start(config_options: List[str]) -> None: "synapse.app.user_dir", ) - if config.worker.worker_app == "synapse.app.user_dir": - if config.server.update_user_directory: - sys.stderr.write( - "\nThe update_user_directory must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``update_user_directory: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the pushers to start since they will be disabled in the main config - config.server.update_user_directory = True - else: - # For other worker types we force this to off. - config.server.update_user_directory = False - synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage diff --git a/synapse/config/server.py b/synapse/config/server.py index 1e709c7cf5..005a3ee48c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -319,10 +319,6 @@ class ServerConfig(Config): self.presence_router_config, ) = load_module(presence_router_config, ("presence", "presence_router")) - # Whether to update the user directory or not. This should be set to - # false only if we are updating the user directory in a worker - self.update_user_directory = config.get("update_user_directory", True) - # whether to enable the media repository endpoints. This should be set # to false if the media repository is running as a separate endpoint; # doing so ensures that we will not run cache cleanup jobs on the diff --git a/synapse/config/workers.py b/synapse/config/workers.py index a9dbcc6d3d..e1569b3c14 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -311,6 +311,13 @@ class WorkerConfig(Config): new_option_name="notify_appservices_from_worker", ) + self.should_update_user_directory = self._should_this_worker_perform_duty( + config, + legacy_master_option_name="update_user_directory", + legacy_worker_app_name="synapse.app.user_dir", + new_option_name="update_user_directory_from_worker", + ) + def _should_this_worker_perform_duty( self, config: Dict[str, Any], diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 048fd4bb82..74f7fdfe6c 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -60,7 +60,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id - self.update_user_directory = hs.config.server.update_user_directory + self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users self.spam_checker = hs.get_spam_checker() # The current position in the current_state_delta stream diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index da81bb9655..ef6294ecb2 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -286,3 +286,30 @@ class WorkerDutyConfigTestCase(TestCase): "notify_appservices_from_worker", ) ) + + def test_worker_duty_configs(self) -> None: + """ + Additional tests for the worker duties + """ + + worker1_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={ + "notify_appservices_from_worker": "worker2", + "update_user_directory_from_worker": "worker1", + }, + ) + self.assertFalse(worker1_config.should_notify_appservices) + self.assertTrue(worker1_config.should_update_user_directory) + + worker2_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", + worker_name="worker2", + extras={ + "notify_appservices_from_worker": "worker2", + "update_user_directory_from_worker": "worker1", + }, + ) + self.assertTrue(worker2_config.should_notify_appservices) + self.assertFalse(worker2_config.should_update_user_directory) From 464fe99f52e6384fe8a0a422977087e70d300db2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:12:13 +0100 Subject: [PATCH 80/87] Fix changelog link --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index cec3e29c49..eb2cfb18f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.59.0rc1 (2022-05-10) This release makes several changes that server administrators should be aware of: - Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) -- The `synapse.app.appservice` is now deprecated. ([\#12452])(https://github.com/matrix-org/synapse/pull/12452)) +- The `synapse.app.appservice` is now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details. From 946b8437cfcf6c434b04eaa25d7899d3d738322f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:12:20 +0100 Subject: [PATCH 81/87] Group release script changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index eb2cfb18f1..3b761c14e4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -67,7 +67,6 @@ Internal Changes - Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480)) - Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500)) - Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505)) -- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) - Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564)) - Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568)) - Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577)) @@ -86,6 +85,7 @@ Internal Changes - Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) - Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652)) - Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665)) +- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) - Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) ### Typechecking From 239da21c1ae02f310da863dbb4a3da1a5404fbfd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:12:31 +0100 Subject: [PATCH 82/87] Add Olivier's last-minute merge --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 3b761c14e4..8f3a390921 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,6 +17,7 @@ Features - Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670)) - Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) - Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) +- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654)) - Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) - Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore m.room.server_acl events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) - Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) From 2bae6d93c94e47e7ce9459bf17ee3ae18b2a0923 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:17:42 +0100 Subject: [PATCH 83/87] I manually added O's change, remove newsfile --- changelog.d/12654.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/12654.feature diff --git a/changelog.d/12654.feature b/changelog.d/12654.feature deleted file mode 100644 index fd0a4bb0da..0000000000 --- a/changelog.d/12654.feature +++ /dev/null @@ -1 +0,0 @@ -Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. \ No newline at end of file From 80b3246528d23329b7d3dcd230bd4728ba26068c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:29:40 +0100 Subject: [PATCH 84/87] Fix deprecation notice --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8f3a390921..82cdf2b593 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.59.0rc1 (2022-05-10) This release makes several changes that server administrators should be aware of: - Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) -- The `synapse.app.appservice` is now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) +- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654)) See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details. From c707ea736a2401ad898b1e4d43c4d75fbc93846d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:29:49 +0100 Subject: [PATCH 85/87] v1 -> 1 --- CHANGES.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 82cdf2b593..ea6005cc37 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -26,12 +26,12 @@ Features Bugfixes -------- -- Fix a bug introduced in Synapse v1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) -- Fix a bug introduced in Synapse v1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) -- Fix a bug introduced in Synapse v1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) +- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) +- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) +- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) - Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) - Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594)) -- Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) +- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) - Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657)) - Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) From 735faab2b899f230bafd8c321ebb1e2486d22e6e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:30:20 +0100 Subject: [PATCH 86/87] backquote `m.room.server_acl` --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ea6005cc37..dc4a30f8bc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,7 +19,7 @@ Features - Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) - Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654)) - Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) -- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore m.room.server_acl events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) +- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) - Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) From efcd899f697a02c80e6ab5ef8e6985ce67e08e79 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 May 2022 11:31:10 +0100 Subject: [PATCH 87/87] other fixes --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dc4a30f8bc..c625e4d561 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -49,7 +49,7 @@ Improved Documentation - Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536)) - Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) - Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621)) -- Fixes to the formatting of README.rst. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) +- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) - Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664)) @@ -81,7 +81,7 @@ Internal Changes - Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614)) - Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) - Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620)) -- Remove use of `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) +- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) - Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632)) - Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) - Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))