From 8839b6c2f8b07d5d122a15e79b1ebdbdd5f3e26b Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 24 May 2023 13:23:26 -0700 Subject: [PATCH 01/21] Add requesting user id parameter to key claim methods in `TransportLayerClient` (#15663) --- changelog.d/15663.misc | 1 + synapse/federation/federation_client.py | 6 ++++-- synapse/federation/transport/client.py | 16 +++++++++++++--- synapse/handlers/e2e_keys.py | 3 ++- synapse/rest/client/keys.py | 8 ++++---- tests/handlers/test_e2e_keys.py | 16 +++++++++++++++- 6 files changed, 39 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15663.misc diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc new file mode 100644 index 0000000000..cc5f801543 --- /dev/null +++ b/changelog.d/15663.misc @@ -0,0 +1 @@ +Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 076b9287c6..a2cf3a96c6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -236,6 +236,7 @@ class FederationClient(FederationBase): async def claim_client_keys( self, + user: UserID, destination: str, query: Dict[str, Dict[str, Dict[str, int]]], timeout: Optional[int], @@ -243,6 +244,7 @@ class FederationClient(FederationBase): """Claims one-time keys for a device hosted on a remote server. Args: + user: The user id of the requesting user destination: Domain name of the remote homeserver content: The query content. @@ -279,7 +281,7 @@ class FederationClient(FederationBase): if use_unstable: try: return await self.transport_layer.claim_client_keys_unstable( - destination, unstable_content, timeout + user, destination, unstable_content, timeout ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, @@ -295,7 +297,7 @@ class FederationClient(FederationBase): logger.debug("Skipping unstable claim client keys API") return await self.transport_layer.claim_client_keys( - destination, content, timeout + user, destination, content, timeout ) @trace diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1cfc4446c4..0b17f713ea 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -45,7 +45,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.federation.units import Transaction from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser from synapse.http.types import QueryParams -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import ExceptionBundle if TYPE_CHECKING: @@ -630,7 +630,11 @@ class TransportLayerClient: ) async def claim_client_keys( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -655,6 +659,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: @@ -671,7 +676,11 @@ class TransportLayerClient: ) async def claim_client_keys_unstable( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -696,6 +705,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 24741b667b..ad075497c8 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -661,6 +661,7 @@ class E2eKeysHandler: async def claim_one_time_keys( self, query: Dict[str, Dict[str, Dict[str, int]]], + user: UserID, timeout: Optional[int], always_include_fallback_keys: bool, ) -> JsonDict: @@ -703,7 +704,7 @@ class E2eKeysHandler: device_keys = remote_queries[destination] try: remote_result = await self.federation.claim_client_keys( - destination, device_keys, timeout=timeout + user, destination, device_keys, timeout=timeout ) for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9bbab5e624..413edd8a4d 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -287,7 +287,7 @@ class OneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -298,7 +298,7 @@ class OneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = {algorithm: 1} result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=False + query, requester.user, timeout, always_include_fallback_keys=False ) return 200, result @@ -335,7 +335,7 @@ class UnstableOneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -346,7 +346,7 @@ class UnstableOneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = Counter(algorithms) result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=True + query, requester.user, timeout, always_include_fallback_keys=True ) return 200, result diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 72d0584061..2eaffe511e 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -27,7 +27,7 @@ from synapse.appservice import ApplicationService from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import Clock from tests import unittest @@ -45,6 +45,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main + self.requester = UserID.from_string(f"@test_requester:{self.hs.hostname}") def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" @@ -161,6 +162,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res2 = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -206,6 +208,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -225,6 +228,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -274,6 +278,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -286,6 +291,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -307,6 +313,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -348,6 +355,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -370,6 +378,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1080,6 +1089,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}, device_id_2: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -1125,6 +1135,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1169,6 +1180,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1202,6 +1214,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1229,6 +1242,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) From 77156a4bc1f87e98754e3f7f86e52a84a4253a10 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 May 2023 23:22:24 -0500 Subject: [PATCH 02/21] Process previously failed backfill events in the background (#15585) Process previously failed backfill events in the background because they are bound to fail again and we don't need to waste time holding up the request for something that is bound to fail again. Fix https://github.com/matrix-org/synapse/issues/13623 Follow-up to https://github.com/matrix-org/synapse/issues/13621 and https://github.com/matrix-org/synapse/issues/13622 Part of making `/messages` faster: https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/15585.feature | 1 + synapse/handlers/federation_event.py | 70 ++++++++++++-- .../databases/main/event_federation.py | 31 +++++- synapse/util/iterutils.py | 27 ++++++ tests/handlers/test_federation_event.py | 95 +++++++++++++++++++ tests/storage/test_event_federation.py | 37 ++++++++ 6 files changed, 252 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15585.feature diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature new file mode 100644 index 0000000000..1adcfb69ee --- /dev/null +++ b/changelog.d/15585.feature @@ -0,0 +1 @@ +Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9a08618da5..42141d3670 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -88,7 +88,7 @@ from synapse.types import ( ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.iterutils import batch_iter +from synapse.util.iterutils import batch_iter, partition from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -865,7 +865,7 @@ class FederationEventHandler: [event.event_id for event in events] ) - new_events = [] + new_events: List[EventBase] = [] for event in events: event_id = event.event_id @@ -895,12 +895,66 @@ class FederationEventHandler: str(len(new_events)), ) - # We want to sort these by depth so we process them and - # tell clients about them in order. - sorted_events = sorted(new_events, key=lambda x: x.depth) - for ev in sorted_events: - with nested_logging_context(ev.event_id): - await self._process_pulled_event(origin, ev, backfilled=backfilled) + @trace + async def _process_new_pulled_events(new_events: Collection[EventBase]) -> None: + # We want to sort these by depth so we process them and tell clients about + # them in order. It's also more efficient to backfill this way (`depth` + # ascending) because one backfill event is likely to be the `prev_event` of + # the next event we're going to process. + sorted_events = sorted(new_events, key=lambda x: x.depth) + for ev in sorted_events: + with nested_logging_context(ev.event_id): + await self._process_pulled_event(origin, ev, backfilled=backfilled) + + # Check if we've already tried to process these events at some point in the + # past. We aren't concerned with the expontntial backoff here, just whether it + # has failed to be processed before. + event_ids_with_failed_pull_attempts = ( + await self._store.get_event_ids_with_failed_pull_attempts( + [event.event_id for event in new_events] + ) + ) + + # We construct the event lists in source order from `/backfill` response because + # it's a) easiest, but also b) the order in which we process things matters for + # MSC2716 historical batches because many historical events are all at the same + # `depth` and we rely on the tenuous sort that the other server gave us and hope + # they're doing their best. The brittle nature of this ordering for historical + # messages over federation is one of the reasons why we don't want to continue + # on MSC2716 until we have online topological ordering. + events_with_failed_pull_attempts, fresh_events = partition( + new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events_with_failed_pull_attempts", + str(event_ids_with_failed_pull_attempts), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "events_with_failed_pull_attempts.length", + str(len(events_with_failed_pull_attempts)), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "fresh_events", + str([event.event_id for event in fresh_events]), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "fresh_events.length", + str(len(fresh_events)), + ) + + # Process previously failed backfill events in the background to not waste + # time on something that is likely to fail again. + if len(events_with_failed_pull_attempts) > 0: + run_as_background_process( + "_process_new_pulled_events_with_failed_pull_attempts", + _process_new_pulled_events, + events_with_failed_pull_attempts, + ) + + # We can optimistically try to process and wait for the event to be fully + # persisted if we've never tried before. + if len(fresh_events) > 0: + await _process_new_pulled_events(fresh_events) @trace @tag_args diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ac19de183c..2681917d0b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1583,6 +1583,35 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause)) + @trace + async def get_event_ids_with_failed_pull_attempts( + self, event_ids: StrCollection + ) -> Set[str]: + """ + Filter the given list of `event_ids` and return events which have any failed + pull attempts. + + Args: + event_ids: A list of events to filter down. + + Returns: + A filtered down list of `event_ids` that have previous failed pull attempts. + """ + + rows = await self.db_pool.simple_select_many_batch( + table="event_failed_pull_attempts", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id",), + desc="get_event_ids_with_failed_pull_attempts", + ) + event_ids_with_failed_pull_attempts: Set[str] = { + row["event_id"] for row in rows + } + + return event_ids_with_failed_pull_attempts + @trace async def get_event_ids_to_not_pull_from_backoff( self, diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 4938ddf703..a0efb96d3b 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -15,11 +15,13 @@ import heapq from itertools import islice from typing import ( + Callable, Collection, Dict, Generator, Iterable, Iterator, + List, Mapping, Set, Sized, @@ -71,6 +73,31 @@ def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]: return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen)) +def partition( + iterable: Iterable[T], predicate: Callable[[T], bool] +) -> Tuple[List[T], List[T]]: + """ + Separate a given iterable into two lists based on the result of a predicate function. + + Args: + iterable: the iterable to partition (separate) + predicate: a function that takes an item from the iterable and returns a boolean + + Returns: + A tuple of two lists, the first containing all items for which the predicate + returned True, the second containing all items for which the predicate returned + False + """ + true_results = [] + false_results = [] + for item in iterable: + if predicate(item): + true_results.append(item) + else: + false_results.append(item) + return true_results, false_results + + def sorted_topologically( nodes: Iterable[T], graph: Mapping[T, Collection[T]], diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index c067e5bfe3..23f1b33b2f 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -664,6 +664,101 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): StoreError, ) + def test_backfill_process_previously_failed_pull_attempt_event_in_the_background( + self, + ) -> None: + """ + Sanity check that events are still processed even if it is in the background + for events that already have failed pull attempts. + """ + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # Allow the remote user to send state events + self.helper.send_state( + room_id, + "m.room.power_levels", + {"events_default": 0, "state_default": 0}, + tok=tok, + ) + + # Add the remote user to the room + member_event = self.get_success( + event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join") + ) + + initial_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + + auth_event_ids = [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + member_event.event_id, + ] + + # Create a regular event that should process + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "test_regular_type", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [ + member_event.event_id, + ], + "auth_events": auth_event_ids, + "origin_server_ts": 1, + "depth": 12, + "content": {"body": "pulled_event"}, + } + ), + room_version, + ) + + # Record a failed pull attempt for this event which will cause us to backfill it + # in the background from here on out. + self.get_success( + main_store.record_event_failed_pull_attempt( + room_id, pulled_event.event_id, "fake cause" + ) + ) + + # We expect an outbound request to /backfill, so stub that out + self.mock_federation_transport_client.backfill.return_value = make_awaitable( + { + "origin": self.OTHER_SERVER_NAME, + "origin_server_ts": 123, + "pdus": [ + pulled_event.get_pdu_json(), + ], + } + ) + + # The function under test: try to backfill and process the pulled event + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler().backfill( + self.OTHER_SERVER_NAME, + room_id, + limit=1, + extremities=["$some_extremity"], + ) + ) + + # Ensure `run_as_background_process(...)` has a chance to run (essentially + # `wait_for_background_processes()`) + self.reactor.pump((0.1,)) + + # Make sure we processed and persisted the pulled event + self.get_success(main_store.get_event(pulled_event.event_id, allow_none=False)) + def test_process_pulled_event_with_rejected_missing_state(self) -> None: """Ensure that we correctly handle pulled events with missing state containing a rejected state event diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 81e50bdd55..4b8d8328d7 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1134,6 +1134,43 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["insertion_eventA"]) + def test_get_event_ids_with_failed_pull_attempts(self) -> None: + """ + Test to make sure we properly get event_ids based on whether they have any + failed pull attempts. + """ + # Create the room + user_id = self.register_user("alice", "test") + tok = self.login("alice", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id1", "fake cause" + ) + ) + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id2", "fake cause" + ) + ) + + event_ids_with_failed_pull_attempts = self.get_success( + self.store.get_event_ids_with_failed_pull_attempts( + event_ids=[ + "$failed_event_id1", + "$fresh_event_id1", + "$failed_event_id2", + "$fresh_event_id2", + ] + ) + ) + + self.assertEqual( + event_ids_with_failed_pull_attempts, + {"$failed_event_id1", "$failed_event_id2"}, + ) + def test_get_event_ids_to_not_pull_from_backoff(self) -> None: """ Test to make sure only event IDs we should backoff from are returned. From 2d8a2ca374916e8a24ff43355c0ad24d456fab25 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 10:53:10 +0000 Subject: [PATCH 03/21] Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. (#15673) * Add dch and notify-send to the Nix dev flake * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15673.misc | 1 + flake.nix | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/15673.misc diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc new file mode 100644 index 0000000000..52148fc63f --- /dev/null +++ b/changelog.d/15673.misc @@ -0,0 +1 @@ +Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 7351571e61..8c7a4f8769 100644 --- a/flake.nix +++ b/flake.nix @@ -100,6 +100,10 @@ # For building the Synapse documentation website. mdbook + + # For releasing Synapse + debian-devscripts # (`dch` for manipulating the Debian changelog) + libnotify # (the release script uses `notify-send` to tell you when CI jobs are done) ]; # Install Python and manage a virtualenv with Poetry. From 4e013093a87094c711eb047a41e2de3807c7873e Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 05:46:13 -0600 Subject: [PATCH 04/21] Add MSC3820 (room version 11) option 2 unstable room version. (#15666) --- changelog.d/15666.misc | 1 + synapse/api/room_versions.py | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/15666.misc diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15666.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 7030b133d3..035a14171b 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -485,6 +485,30 @@ class RoomVersions: msc3931_push_features=(), msc3989_redaction_rules=True, ) + MSC3820opt2 = RoomVersion( + # Based upon v10 + "org.matrix.msc3820.opt2", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=True, # Used by MSC3820 + msc2176_redaction_rules=True, # Used by MSC3820 + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, + msc3821_redaction_rules=True, # Used by MSC3820 + msc3931_push_features=(), + msc3989_redaction_rules=True, # Used by MSC3820 + ) KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { From c775d80b73b7930b9541e353fc24dcef66579e48 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 14:28:55 +0000 Subject: [PATCH 05/21] Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. (#15672) * Fix #15669: always populate instance map even if it was empty * Fix some tests * Fix more tests * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * CI fix: don't forget to update apt repository sources before installing olddeps deps * Add test testing the backwards compatibility --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- .github/workflows/tests.yml | 1 + changelog.d/15672.bugfix | 1 + synapse/config/workers.py | 2 +- tests/app/test_homeserver_start.py | 2 ++ tests/app/test_openid_listener.py | 1 + tests/config/test_workers.py | 43 +++++++++++++++++++++--- tests/replication/test_federation_ack.py | 1 + tests/storage/test_rollback_worker.py | 1 + 8 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15672.bugfix diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 51cbeb3298..ce3a57fb01 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -314,6 +314,7 @@ jobs: # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | + sudo apt-get -qq update sudo apt-get -qq install build-essential libffi-dev python-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix new file mode 100644 index 0000000000..c81d7332b7 --- /dev/null +++ b/changelog.d/15672.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/synapse/config/workers.py b/synapse/config/workers.py index d2311cc857..38e13dd7b5 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -222,7 +222,7 @@ class WorkerConfig(Config): # itself doesn't need this data as it would never have to talk to itself. instance_map: Dict[str, Any] = config.get("instance_map", {}) - if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: # The host used to connect to the main synapse main_host = config.get("worker_replication_host", None) diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index 788c935537..cd117b7394 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -25,6 +25,8 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): # Add a blank line as otherwise the next addition ends up on a line with a comment self.add_lines_to_config([" "]) self.add_lines_to_config(["worker_app: test_worker_app"]) + self.add_lines_to_config(["worker_replication_host: 127.0.0.1"]) + self.add_lines_to_config(["worker_replication_http_port: 0"]) # Ensure that starting master process with worker config raises an exception with self.assertRaises(ConfigError): diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 2ee343d8a4..056d9402a4 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -42,6 +42,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index 49a6bdf408..086359fd71 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from immutabledict import immutabledict from synapse.config import ConfigError -from synapse.config.workers import WorkerConfig +from synapse.config.workers import InstanceLocationConfig, WorkerConfig from tests.unittest import TestCase @@ -94,6 +94,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -138,7 +139,9 @@ class WorkerDutyConfigTestCase(TestCase): """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -203,6 +206,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -236,7 +240,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the master's config. """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -262,7 +268,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the worker's config. """ appservice_worker_config = self._make_worker_config( - worker_app="synapse.app.generic_worker", worker_name="worker1" + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -298,6 +306,7 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertFalse(worker1_config.should_notify_appservices) @@ -309,7 +318,33 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) + + def test_worker_instance_map_compat(self) -> None: + """ + Test that `worker_replication_*` settings are compatibly handled by + adding them to the instance map as a `main` entry. + """ + + worker1_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={ + "notify_appservices_from_worker": "worker2", + "update_user_directory_from_worker": "worker1", + "worker_replication_host": "127.0.0.42", + "worker_replication_http_port": 1979, + }, + ) + self.assertEqual( + worker1_config.instance_map, + { + "master": InstanceLocationConfig( + host="127.0.0.42", port=1979, tls=False + ), + }, + ) diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 12668b34c5..cf59b1a204 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -32,6 +32,7 @@ class FederationAckTestCase(HomeserverTestCase): config["worker_app"] = "synapse.app.generic_worker" config["worker_name"] = "federation_sender1" config["federation_sender_instances"] = ["federation_sender1"] + config["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return config def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 966aafea6f..6861d3a6c9 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -55,6 +55,7 @@ class WorkerSchemaTests(HomeserverTestCase): # Mark this as a worker app. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf From 65bf5f3649fd108d91fe64795186d27940e80426 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:17:50 +0100 Subject: [PATCH 06/21] 1.84.1 --- CHANGES.md | 19 +++++++++++++++++++ changelog.d/15672.bugfix | 1 - changelog.d/15673.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 26 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/15672.bugfix delete mode 100644 changelog.d/15673.misc diff --git a/CHANGES.md b/CHANGES.md index e9397158f1..1fe1d013c6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,22 @@ +Synapse 1.84.1 (2023-05-26) +=========================== + +This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672)) + + +Internal Changes +---------------- + +- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673)) + + Synapse 1.84.0 (2023-05-23) =========================== diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix deleted file mode 100644 index c81d7332b7..0000000000 --- a/changelog.d/15672.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc deleted file mode 100644 index 52148fc63f..0000000000 --- a/changelog.d/15673.misc +++ /dev/null @@ -1 +0,0 @@ -Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 51935e03b6..fbdc9c177e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.1) stable; urgency=medium + + * New Synapse release 1.84.1. + + -- Synapse Packaging team Fri, 26 May 2023 16:15:30 +0100 + matrix-synapse-py3 (1.84.0) stable; urgency=medium * New Synapse release 1.84.0. diff --git a/pyproject.toml b/pyproject.toml index 9c77f9294a..6e9bce65b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.0" +version = "1.84.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From cb6f4a84a6a8f2b79b80851f37eb5fa4c7c5264a Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:18:35 +0100 Subject: [PATCH 07/21] Fix a typographical error in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1fe1d013c6..85c9af8ce4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.84.1 (2023-05-26) =========================== -This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers. If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. From 2ad91ec628126753590c1a90c432270d6c8fa8fd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 26 May 2023 13:16:08 -0400 Subject: [PATCH 08/21] Set thread_id column to non-null for event_push_{actions,actions_staging,summary} (#15597) Updates the database schema to require a thread_id (by adding a constraint that the column is non-null) for event_push_actions, event_push_actions_staging, and event_push_actions_summary. For PostgreSQL we add the constraint as NOT VALID, then VALIDATE the constraint a background job to avoid locking the table during an upgrade. Each table is updated as a separate schema delta to avoid deadlocks between them. For SQLite we simply rebuild the table & copy the data. --- changelog.d/15597.misc | 1 + synapse/storage/background_updates.py | 44 +++ .../databases/main/event_push_actions.py | 254 +++--------------- synapse/storage/schema/__init__.py | 3 + .../77/05thread_notifications_backfill.sql | 28 ++ ...06thread_notifications_not_null.sql.sqlite | 102 +++++++ ...s_not_null_event_push_actions.sql.postgres | 27 ++ ...ll_event_push_actions_staging.sql.postgres | 27 ++ ...s_not_null_event_push_summary.sql.postgres | 29 ++ 9 files changed, 292 insertions(+), 223 deletions(-) create mode 100644 changelog.d/15597.misc create mode 100644 synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc new file mode 100644 index 0000000000..2dea23784f --- /dev/null +++ b/changelog.d/15597.misc @@ -0,0 +1 @@ +Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a99aea8926..ca085ef800 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -561,6 +561,50 @@ class BackgroundUpdater: updater, oneshot=True ) + def register_background_validate_constraint( + self, update_name: str, constraint_name: str, table: str + ) -> None: + """Helper for store classes to do a background validate constraint. + + This only applies on PostgreSQL. + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('validate_my_constraint', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name: update_name to register for + constraint_name: name of constraint to validate + table: table the constraint is applied to + """ + + def runner(conn: Connection) -> None: + c = conn.cursor() + + sql = f""" + ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}; + """ + logger.debug("[SQL] %s", sql) + c.execute(sql) + + async def updater(progress: JsonDict, batch_size: int) -> int: + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + logger.info("Validating constraint %s to %s", constraint_name, table) + await self.db_pool.runWithConnection(runner) + await self._end_background_update(update_name) + return 1 + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + async def create_index_in_background( self, index_name: str, diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 6fdb1e292e..07bda7d6be 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -289,179 +289,52 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas unique=True, ) - self.db_pool.updates.register_background_update_handler( - "event_push_backfill_thread_id", - self._background_backfill_thread_id, + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_staging_thread_id", + constraint_name="event_push_actions_staging_thread_id", + table="event_push_actions_staging", ) - - # Indexes which will be used to quickly make the thread_id column non-null. - self.db_pool.updates.register_background_index_update( - "event_push_actions_thread_id_null", - index_name="event_push_actions_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_thread_id", + constraint_name="event_push_actions_thread_id", table="event_push_actions", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - self.db_pool.updates.register_background_index_update( - "event_push_summary_thread_id_null", - index_name="event_push_summary_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_summary_thread_id", + constraint_name="event_push_summary_thread_id", table="event_push_summary", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates the event_push_actions and event_push_summary tables. - self._clock.call_later(0.0, self._check_event_push_backfill_thread_id) - self._event_push_backfill_thread_id_done = False - - @wrap_as_background_process("check_event_push_backfill_thread_id") - async def _check_event_push_backfill_thread_id(self) -> None: - """ - Has thread_id finished backfilling? - - If not, we need to just-in-time update it so the queries work. - """ - done = await self.db_pool.updates.has_completed_background_update( - "event_push_backfill_thread_id" + self.db_pool.updates.register_background_update_handler( + "event_push_drop_null_thread_id_indexes", + self._background_drop_null_thread_id_indexes, ) - if done: - self._event_push_backfill_thread_id_done = True - else: - # Reschedule to run. - self._clock.call_later(15.0, self._check_event_push_backfill_thread_id) - - async def _background_backfill_thread_id( + async def _background_drop_null_thread_id_indexes( self, progress: JsonDict, batch_size: int ) -> int: """ - Fill in the thread_id field for event_push_actions and event_push_summary. - - This is preparatory so that it can be made non-nullable in the future. - - Because all current (null) data is done in an unthreaded manner this - simply assumes it is on the "main" timeline. Since event_push_actions - are periodically cleared it is not possible to correctly re-calculate - the thread_id. + Drop the indexes used to find null thread_ids for event_push_actions and + event_push_summary. """ - event_push_actions_done = progress.get("event_push_actions_done", False) - def add_thread_id_txn( - txn: LoggingTransaction, start_stream_ordering: int - ) -> int: - sql = """ - SELECT stream_ordering - FROM event_push_actions - WHERE - thread_id IS NULL - AND stream_ordering > ? - ORDER BY stream_ordering - LIMIT ? - """ - txn.execute(sql, (start_stream_ordering, batch_size)) + def drop_null_thread_id_indexes_txn(txn: LoggingTransaction) -> None: + sql = "DROP INDEX IF EXISTS event_push_actions_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - # No more rows to process. - rows = txn.fetchall() - if not rows: - progress["event_push_actions_done"] = True - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - return 0 + sql = "DROP INDEX IF EXISTS event_push_summary_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - # Update the thread ID for any of those rows. - max_stream_ordering = rows[-1][0] - - sql = """ - UPDATE event_push_actions - SET thread_id = 'main' - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """ - txn.execute( - sql, - ( - start_stream_ordering, - max_stream_ordering, - ), - ) - - # Update progress. - processed_rows = txn.rowcount - progress["max_event_push_actions_stream_ordering"] = max_stream_ordering - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - def add_thread_id_summary_txn(txn: LoggingTransaction) -> int: - min_user_id = progress.get("max_summary_user_id", "") - min_room_id = progress.get("max_summary_room_id", "") - - # Slightly overcomplicated query for getting the Nth user ID / room - # ID tuple, or the last if there are less than N remaining. - sql = """ - SELECT user_id, room_id FROM ( - SELECT user_id, room_id FROM event_push_summary - WHERE (user_id, room_id) > (?, ?) - AND thread_id IS NULL - ORDER BY user_id, room_id - LIMIT ? - ) AS e - ORDER BY user_id DESC, room_id DESC - LIMIT 1 - """ - - txn.execute(sql, (min_user_id, min_room_id, batch_size)) - row = txn.fetchone() - if not row: - return 0 - - max_user_id, max_room_id = row - - sql = """ - UPDATE event_push_summary - SET thread_id = 'main' - WHERE - (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?) - AND thread_id IS NULL - """ - txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id)) - processed_rows = txn.rowcount - - progress["max_summary_user_id"] = max_user_id - progress["max_summary_room_id"] = max_room_id - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - # First update the event_push_actions table, then the event_push_summary table. - # - # Note that the event_push_actions_staging table is ignored since it is - # assumed that items in that table will only exist for a short period of - # time. - if not event_push_actions_done: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_txn, - progress.get("max_event_push_actions_stream_ordering", 0), - ) - else: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_summary_txn, - ) - - # Only done after the event_push_summary table is done. - if not result: - await self.db_pool.updates._end_background_update( - "event_push_backfill_thread_id" - ) - - return result + await self.db_pool.runInteraction( + "drop_null_thread_id_indexes_txn", + drop_null_thread_id_indexes_txn, + ) + await self.db_pool.updates._end_background_update( + "event_push_drop_null_thread_id_indexes" + ) + return 0 async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: """Get the notification count by room for a user. Only considers notifications, @@ -711,25 +584,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # First we pull the counts from the summary table. # # We check that `last_receipt_stream_ordering` matches the stream ordering of the @@ -1545,25 +1399,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering, *thread_args), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # Fetch the notification counts between the stream ordering of the # latest receipt and what was previously summarised. unread_counts = self._get_notif_unread_count_for_user_room( @@ -1698,19 +1533,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas rotate_to_stream_ordering: The new maximum event stream ordering to summarise. """ - # Ensure that any new actions have an updated thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """, - (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # XXX Do we need to update summaries here too? - # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, thread_id, @@ -1773,20 +1595,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications, handling %d rows", len(summaries)) - # Ensure that any updated threads have the proper thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute_batch( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - [ - (MAIN_TIMELINE, room_id, user_id) - for user_id, room_id, _ in summaries - ], - ) - self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index df2cc31ca6..5cc786f030 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -110,6 +110,9 @@ SCHEMA_COMPAT_VERSION = ( # Queries against `event_stream_ordering` columns in membership tables must # be disambiguated. # + # The threads_id column must written to with non-null values for the + # event_push_actions, event_push_actions_staging, and event_push_summary tables. + # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null 76 diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql new file mode 100644 index 0000000000..ce6f9ff937 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Force the background updates from 06thread_notifications.sql to run in the +-- foreground as code will now require those to be "done". + +DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id'; + +-- Overwrite any null thread_id values. +UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Drop the background updates to calculate the indexes used to find null thread_ids. +DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; +DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite new file mode 100644 index 0000000000..d19b9648b5 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite @@ -0,0 +1,102 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- The thread_id columns can now be made non-nullable. +-- +-- SQLite doesn't support modifying columns to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE event_push_actions_staging_new ( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + actions TEXT NOT NULL, + notif SMALLINT NOT NULL, + highlight SMALLINT NOT NULL, + unread SMALLINT, + thread_id TEXT, + inserted_ts BIGINT, + CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_actions_new ( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + topological_ordering BIGINT, + stream_ordering BIGINT, + notif SMALLINT, + highlight SMALLINT, + unread SMALLINT, + thread_id TEXT, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag), + CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_summary_new ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + unread_count BIGINT, + last_receipt_stream_ordering BIGINT, + thread_id TEXT, + CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL) +); + +-- Copy the data. +INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts) + SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts + FROM event_push_actions_staging; + +INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id) + SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id + FROM event_push_actions; + +INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id + FROM event_push_summary; + +-- Drop the old tables. +DROP TABLE event_push_actions_staging; +DROP TABLE event_push_actions; +DROP TABLE event_push_summary; + +-- Rename the tables. +ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging; +ALTER TABLE event_push_actions_new RENAME TO event_push_actions; +ALTER TABLE event_push_summary_new RENAME TO event_push_summary; + +-- Recreate the indexes. +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); + +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); + +CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ; + +-- Recreate some indexes in the background, by re-running the background updates +-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_summary_unique_index2', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_stream_highlight_index', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres new file mode 100644 index 0000000000..381184b5e2 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions + ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_actions_thread_id', '{}', 'event_push_actions_staging_thread_id'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres new file mode 100644 index 0000000000..395f9c7260 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions_staging + ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_staging_thread_id', '{}'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres new file mode 100644 index 0000000000..140ceff1fa --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres @@ -0,0 +1,29 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_summary + ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_summary_thread_id', '{}', 'event_push_actions_thread_id'), + -- Also clean-up the old indexes. + (7706, 'event_push_drop_null_thread_id_indexes', '{}', 'event_push_summary_thread_id'); From 179f0f851e456c8dda3c7092bcb72bd2ec5e65cc Mon Sep 17 00:00:00 2001 From: Grant McLean Date: Sat, 27 May 2023 05:28:04 +1200 Subject: [PATCH 09/21] Documentation improvements to contributing guide (#15667) (#15668) Fix #15667 - Reiterate the importance of getting Rust installed and set up before attempting to install the Python dependencies. - Mention the importance of confirming that `poetry install` completed successfully and include a typical error that the user might see if it did not. - Expand on "Now edit homeserver.yaml" to give examples of things likely to need changing and to link to the relevant sections of the Synapse server documentation. --- changelog.d/15668.doc | 1 + docs/development/contributing_guide.md | 33 ++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15668.doc diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc new file mode 100644 index 0000000000..3526a4d50c --- /dev/null +++ b/changelog.d/15668.doc @@ -0,0 +1 @@ +Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 56cf4ba81e..f5ba55afb7 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -22,6 +22,9 @@ on Windows is not officially supported. The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough. +A recent version of the Rust compiler is needed to build the native modules. The +easiest way of installing the latest version is to use [rustup](https://rustup.rs/). + Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`. Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`. @@ -30,9 +33,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/). -A recent version of the Rust compiler is needed to build the native modules. The -easiest way of installing the latest version is to use [rustup](https://rustup.rs/). - # 3. Get the source. @@ -53,6 +53,11 @@ can find many good git tutorials on the web. # 4. Install the dependencies + +Before installing the Python dependencies, make sure you have installed a recent version +of Rust (see the "What do I need?" section above). The easiest way of installing the +latest version is to use [rustup](https://rustup.rs/). + Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies and development environment. Once you have installed Python 3 and added the source, you should install `poetry`. @@ -76,7 +81,8 @@ cd path/where/you/have/cloned/the/repository poetry install --extras all ``` -This will install the runtime and developer dependencies for the project. +This will install the runtime and developer dependencies for the project. Be sure to check +that the `poetry install` step completed cleanly. ## Running Synapse via poetry @@ -84,14 +90,31 @@ To start a local instance of Synapse in the locked poetry environment, create a ```sh cp docs/sample_config.yaml homeserver.yaml +cp docs/sample_log_config.yaml log_config.yaml ``` -Now edit homeserver.yaml, and run Synapse with: +Now edit `homeserver.yaml`, things you might want to change include: + +- Set a `server_name` +- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied +- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database) +- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user). + +And then run Synapse with the following command: ```sh poetry run python -m synapse.app.homeserver -c homeserver.yaml ``` +If you get an error like the following: + +``` +importlib.metadata.PackageNotFoundError: matrix-synapse +``` + +this probably indicates that the `poetry install` step did not complete cleanly - go back and +resolve any issues and re-run until successful. + # 5. Get in touch. Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)! From 50918c494057dc93bfa6e37f7d140d68711846d1 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 12:05:24 -0600 Subject: [PATCH 10/21] Add `MSC3820opt2` as a known room version (#15678) --- changelog.d/15678.misc | 1 + synapse/api/room_versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15678.misc diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15678.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 035a14171b..c5c71e242f 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -528,6 +528,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V10, RoomVersions.MSC2716v4, RoomVersions.MSC3989, + RoomVersions.MSC3820opt2, ) } From c835befd10ae0087c3c54a36989ba347313b68af Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 26 May 2023 14:28:39 -0500 Subject: [PATCH 11/21] Add Unix socket support for Redis connections (#15644) Adds a new configuration setting to connect to Redis via a Unix socket instead of over TCP. Disabled by default. --- changelog.d/15644.feature | 1 + .../configuration/config_documentation.md | 4 ++ stubs/txredisapi.pyi | 3 + synapse/config/redis.py | 1 + synapse/replication/tcp/handler.py | 10 ++- synapse/replication/tcp/redis.py | 62 ++++++++++++++++--- synapse/server.py | 42 ++++++++----- 7 files changed, 100 insertions(+), 23 deletions(-) create mode 100644 changelog.d/15644.feature diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature new file mode 100644 index 0000000000..1b6126af53 --- /dev/null +++ b/changelog.d/15644.feature @@ -0,0 +1 @@ +Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 93b132b6e4..5ede6d0a82 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3979,6 +3979,8 @@ This setting has the following sub-options: * `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 +* `path`: The full path to a local Unix socket file. **If this is used, `host` and + `port` are ignored.** Defaults to `/tmp/redis.sock' * `password`: Optional password if configured on the Redis instance. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. * `use_tls`: Whether to use tls connection. Defaults to false. @@ -3991,6 +3993,8 @@ This setting has the following sub-options: _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + Example configuration: ```yaml redis: diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 695a2307c2..b7bd59d2ea 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -61,6 +61,9 @@ def lazyConnection( # most methods to it via ConnectionHandler.__getattr__. class ConnectionHandler(RedisProtocol): def disconnect(self) -> "Deferred[None]": ... + def __repr__(self) -> str: ... + +class UnixConnectionHandler(ConnectionHandler): ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 636cb450b8..3c4c499e22 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -33,6 +33,7 @@ class RedisConfig(Config): self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) + self.redis_path = redis_config.get("path", None) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 233ad61d49..5d108fe11b 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -352,7 +352,15 @@ class ReplicationCommandHandler: reactor = hs.get_reactor() redis_config = hs.config.redis - if hs.config.redis.redis_use_tls: + if redis_config.redis_path is not None: + reactor.connectUNIX( + redis_config.redis_path, + self._factory, + timeout=30, + checkPID=False, + ) + + elif hs.config.redis.redis_use_tls: ssl_context_factory = ClientContextFactory(hs.config.redis) reactor.connectSSL( redis_config.redis_host, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index c8f4bf8b27..7e96145b3b 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -17,7 +17,12 @@ from inspect import isawaitable from typing import TYPE_CHECKING, Any, Generic, List, Optional, Type, TypeVar, cast import attr -import txredisapi +from txredisapi import ( + ConnectionHandler, + RedisFactory, + SubscriberProtocol, + UnixConnectionHandler, +) from zope.interface import implementer from twisted.internet.address import IPv4Address, IPv6Address @@ -68,7 +73,7 @@ class ConstantProperty(Generic[T, V]): @implementer(IReplicationConnection) -class RedisSubscriber(txredisapi.SubscriberProtocol): +class RedisSubscriber(SubscriberProtocol): """Connection to redis subscribed to replication stream. This class fulfils two functions: @@ -95,7 +100,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_prefix: str synapse_channel_names: List[str] - synapse_outbound_redis_connection: txredisapi.ConnectionHandler + synapse_outbound_redis_connection: ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -229,7 +234,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): ) -class SynapseRedisFactory(txredisapi.RedisFactory): +class SynapseRedisFactory(RedisFactory): """A subclass of RedisFactory that periodically sends pings to ensure that we detect dead connections. """ @@ -245,7 +250,7 @@ class SynapseRedisFactory(txredisapi.RedisFactory): dbid: Optional[int], poolsize: int, isLazy: bool = False, - handler: Type = txredisapi.ConnectionHandler, + handler: Type = ConnectionHandler, charset: str = "utf-8", password: Optional[str] = None, replyTimeout: int = 30, @@ -326,7 +331,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): def __init__( self, hs: "HomeServer", - outbound_redis_connection: txredisapi.ConnectionHandler, + outbound_redis_connection: ConnectionHandler, channel_names: List[str], ): super().__init__( @@ -368,7 +373,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.ConnectionHandler: +) -> ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ @@ -380,7 +385,7 @@ def lazyConnection( dbid=dbid, poolsize=1, isLazy=True, - handler=txredisapi.ConnectionHandler, + handler=ConnectionHandler, password=password, replyTimeout=replyTimeout, ) @@ -408,3 +413,44 @@ def lazyConnection( ) return factory.handler + + +def lazyUnixConnection( + hs: "HomeServer", + path: str = "/tmp/redis.sock", + dbid: Optional[int] = None, + reconnect: bool = True, + password: Optional[str] = None, + replyTimeout: int = 30, +) -> ConnectionHandler: + """Creates a connection to Redis that is lazily set up and reconnects if the + connection is lost. + + Returns: + A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case. + """ + + uuid = path + + factory = SynapseRedisFactory( + hs, + uuid=uuid, + dbid=dbid, + poolsize=1, + isLazy=True, + handler=UnixConnectionHandler, + password=password, + replyTimeout=replyTimeout, + ) + factory.continueTrying = reconnect + + reactor = hs.get_reactor() + + reactor.connectUNIX( + path, + factory, + timeout=30, + checkPID=False, + ) + + return factory.handler diff --git a/synapse/server.py b/synapse/server.py index f6e245569c..cce5fb66ff 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -864,22 +864,36 @@ class HomeServer(metaclass=abc.ABCMeta): # We only want to import redis module if we're using it, as we have # `txredisapi` as an optional dependency. - from synapse.replication.tcp.redis import lazyConnection + from synapse.replication.tcp.redis import lazyConnection, lazyUnixConnection - logger.info( - "Connecting to redis (host=%r port=%r) for external cache", - self.config.redis.redis_host, - self.config.redis.redis_port, - ) + if self.config.redis.redis_path is None: + logger.info( + "Connecting to redis (host=%r port=%r) for external cache", + self.config.redis.redis_host, + self.config.redis.redis_port, + ) - return lazyConnection( - hs=self, - host=self.config.redis.redis_host, - port=self.config.redis.redis_port, - dbid=self.config.redis.redis_dbid, - password=self.config.redis.redis_password, - reconnect=True, - ) + return lazyConnection( + hs=self, + host=self.config.redis.redis_host, + port=self.config.redis.redis_port, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) + else: + logger.info( + "Connecting to redis (path=%r) for external cache", + self.config.redis.redis_path, + ) + + return lazyUnixConnection( + hs=self, + path=self.config.redis.redis_path, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) def should_send_federation(self) -> bool: "Should this server be sending federation traffic directly?" From 4f07c2a170aceb8f0ede67f654805d55301b422e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:07:25 -0400 Subject: [PATCH 12/21] Bump types-pyyaml from 6.0.12.9 to 6.0.12.10 (#15683) --- changelog.d/15683.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15683.misc diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc new file mode 100644 index 0000000000..147f13b99c --- /dev/null +++ b/changelog.d/15683.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/poetry.lock b/poetry.lock index 3f8bf7c304..83ea43b59a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3097,14 +3097,14 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.9" +version = "6.0.12.10" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, - {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, + {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"}, + {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"}, ] [[package]] From ea634a9f811fe768efec51edab5b9a9af6ef53e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:13:40 -0400 Subject: [PATCH 13/21] Bump prometheus-client from 0.16.0 to 0.17.0 (#15682) --- changelog.d/15682.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15682.misc diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc new file mode 100644 index 0000000000..687af7d8d7 --- /dev/null +++ b/changelog.d/15682.misc @@ -0,0 +1 @@ +Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/poetry.lock b/poetry.lock index 83ea43b59a..ecf704ea93 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1781,14 +1781,14 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.17.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"}, - {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"}, + {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"}, + {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"}, ] [package.extras] From eb48b10f4fa28ee9839a2b42418889b47c7c36bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:14:58 -0400 Subject: [PATCH 14/21] Bump pydantic from 1.10.7 to 1.10.8 (#15685) --- changelog.d/15685.misc | 1 + poetry.lock | 74 +++++++++++++++++++++--------------------- 2 files changed, 38 insertions(+), 37 deletions(-) create mode 100644 changelog.d/15685.misc diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc new file mode 100644 index 0000000000..7d4cf65bf3 --- /dev/null +++ b/changelog.d/15685.misc @@ -0,0 +1 @@ +Bump pydantic from 1.10.7 to 1.10.8. diff --git a/poetry.lock b/poetry.lock index ecf704ea93..60f09219fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1887,48 +1887,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.7" +version = "1.10.8" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"}, - {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"}, - {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"}, - {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"}, - {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"}, - {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"}, - {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"}, - {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"}, - {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"}, - {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, + {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, + {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, + {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, + {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, + {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, + {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, + {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, + {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, ] [package.dependencies] From 04798b710dc2cc8cf5a8cfb8a454f03cbfa8840c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:15:49 -0400 Subject: [PATCH 15/21] Bump log from 0.4.17 to 0.4.18 (#15681) --- Cargo.lock | 7 ++----- changelog.d/15681.misc | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15681.misc diff --git a/Cargo.lock b/Cargo.lock index e169a665b6..08331385c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,12 +132,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "memchr" diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc new file mode 100644 index 0000000000..2de551dd63 --- /dev/null +++ b/changelog.d/15681.misc @@ -0,0 +1 @@ +Bump log from 0.4.17 to 0.4.18. From 2b6c9150dca9fa1884c0f2e27d5ee268be243c2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:03:58 +0100 Subject: [PATCH 16/21] Bump types-requests from 2.30.0.0 to 2.31.0.0 (#15684) * Bump types-requests from 2.30.0.0 to 2.31.0.0 Bumps [types-requests](https://github.com/python/typeshed) from 2.30.0.0 to 2.31.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15684.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15684.misc diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc new file mode 100644 index 0000000000..4c2edf87fd --- /dev/null +++ b/changelog.d/15684.misc @@ -0,0 +1 @@ +Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/poetry.lock b/poetry.lock index 60f09219fe..4057ef04e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3109,14 +3109,14 @@ files = [ [[package]] name = "types-requests" -version = "2.30.0.0" +version = "2.31.0.0" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.30.0.0.tar.gz", hash = "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"}, - {file = "types_requests-2.30.0.0-py3-none-any.whl", hash = "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864"}, + {file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"}, + {file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"}, ] [package.dependencies] From 626bd75f4847f36747c162348e309b65cc1646b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:13:04 +0100 Subject: [PATCH 17/21] Bump types-bleach from 6.0.0.1 to 6.0.0.3 (#15686) * Bump types-bleach from 6.0.0.1 to 6.0.0.3 Bumps [types-bleach](https://github.com/python/typeshed) from 6.0.0.1 to 6.0.0.3. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-bleach dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions Co-authored-by: Patrick Cloke Co-authored-by: David Robertson --- changelog.d/15686.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15686.misc diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc new file mode 100644 index 0000000000..feacbf35d6 --- /dev/null +++ b/changelog.d/15686.misc @@ -0,0 +1 @@ +Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/poetry.lock b/poetry.lock index 4057ef04e3..0879e64cf1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2998,14 +2998,14 @@ files = [ [[package]] name = "types-bleach" -version = "6.0.0.1" +version = "6.0.0.3" description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-bleach-6.0.0.1.tar.gz", hash = "sha256:43d9129deb9e82918747437edf78f09ff440f2973f4702625b61994f3e698518"}, - {file = "types_bleach-6.0.0.1-py3-none-any.whl", hash = "sha256:440df967254007be80bb0f4d851f026c29c709cc48359bf4935d2b2f3a6f9f90"}, + {file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"}, + {file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"}, ] [[package]] From 42786d8a477b6d44075b0e56949820331d9962d8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 13:54:50 +0100 Subject: [PATCH 18/21] Create dependabot changelogs at release time (#15481) * Ditch dependabot changelog workflow * Summarise dependabot commits in release script * Changelog * Update scripts-dev/release.py --- .github/workflows/dependabot_changelog.yml | 49 -------------------- changelog.d/15481.misc | 1 + docs/development/dependencies.md | 12 ++--- scripts-dev/release.py | 52 ++++++++++++++++++++-- 4 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 .github/workflows/dependabot_changelog.yml create mode 100644 changelog.d/15481.misc diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml deleted file mode 100644 index df47e3dcba..0000000000 --- a/.github/workflows/dependabot_changelog.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Write changelog for dependabot PR -on: - pull_request: - types: - - opened - - reopened # For debugging! - -permissions: - # Needed to be able to push the commit. See - # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request - # for a similar example - contents: write - -jobs: - add-changelog: - runs-on: 'ubuntu-latest' - if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.ref }} - - name: Write, commit and push changelog - env: - PR_TITLE: ${{ github.event.pull_request.title }} - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc - git add changelog.d - git config user.email "github-actions[bot]@users.noreply.github.com" - git config user.name "GitHub Actions" - git commit -m "Changelog" - git push - shell: bash - # The `git push` above does not trigger CI on the dependabot PR. - # - # By default, workflows can't trigger other workflows when they're just using the - # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing - # recursive workflow loops by accident, because that'll get very expensive very - # quickly.) Instead, you have to manually call out to another workflow, or else - # make your changes (i.e. the `git push` above) using a personal access token. - # See - # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow - # - # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR. - # See git commit history for previous attempts. If anyone desperately wants to try - # again in the future, make a matrix-bot account and use its access token to git push. - - # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they - # are sufficiently locked down to dependabot only as above. diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc new file mode 100644 index 0000000000..a6e088c164 --- /dev/null +++ b/changelog.d/15481.misc @@ -0,0 +1 @@ +Create dependabot changelogs at release time. diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index c4449c51f7..b5926d96ff 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -260,15 +260,17 @@ doesn't require poetry. (It's what we use in CI too). However, you could try ## ...handle a Dependabot pull request? -Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it -creates a pull request a GitHub Action will run to automatically create a changelog -file. Ensure that: +Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file +up-to-date with the latest releases of our dependencies. The changelog check is +omitted for Dependabot PRs; the release script will include them in the +changelog. + +When reviewing a dependabot PR, ensure that: * the lockfile changes look reasonable; * the upstream changelog file (linked in the description) doesn't include any breaking changes; -* continuous integration passes (due to permissions, the GitHub Actions run on - the changelog commit will fail, look at the initial commit of the pull request); +* continuous integration passes. In particular, any updates to the type hints (usually packages which start with `types-`) should be safe to merge if linting passes. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ec92a59bb8..257d1e9ebd 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -27,7 +27,7 @@ import time import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import Any, List, Optional +from typing import Any, List, Match, Optional, Union import attr import click @@ -233,7 +233,7 @@ def _prepare() -> None: subprocess.check_output(["poetry", "version", new_version]) # Generate changelogs. - generate_and_write_changelog(current_version, new_version) + generate_and_write_changelog(synapse_repo, current_version, new_version) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -814,7 +814,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str: def generate_and_write_changelog( - current_version: version.Version, new_version: str + repo: Repo, current_version: version.Version, new_version: str ) -> None: # We do this by getting a draft so that we can edit it before writing to the # changelog. @@ -827,6 +827,10 @@ def generate_and_write_changelog( new_changes = new_changes.replace( "No significant changes.", f"No significant changes since {current_version}." ) + new_changes += build_dependabot_changelog( + repo, + current_version, + ) # Prepend changes to changelog with open("CHANGES.md", "r+") as f: @@ -841,5 +845,47 @@ def generate_and_write_changelog( os.remove(filename) +def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str: + """Summarise dependabot commits between `current_version` and `release_branch`. + + Returns an empty string if there have been no such commits; otherwise outputs a + third-level markdown header followed by an unordered list.""" + last_release_commit = repo.tag("v" + str(current_version)).commit + rev_spec = f"{last_release_commit.hexsha}.." + commits = list(git.objects.Commit.iter_items(repo, rev_spec)) + messages = [] + for commit in reversed(commits): + if commit.author.name == "dependabot[bot]": + message: Union[str, bytes] = commit.message + if isinstance(message, bytes): + message = message.decode("utf-8") + messages.append(message.split("\n", maxsplit=1)[0]) + + if not messages: + print(f"No dependabot commits in range {rev_spec}", file=sys.stderr) + return "" + + messages.sort() + + def replacer(match: Match[str]) -> str: + desc = match.group(1) + number = match.group(2) + return f"* {desc}. ([\\#{number}](https://github.com/matrix-org/synapse/issues/{number}))" + + for i, message in enumerate(messages): + messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) + messages.insert(0, "### Updates to locked dependencies\n") + return "\n".join(messages) + + +@cli.command() +@click.argument("since") +def test_dependabot_changelog(since: str) -> None: + """Test building the dependabot changelog. + + Summarises all dependabot commits between the SINCE tag and the current git HEAD.""" + print(build_dependabot_changelog(git.Repo("."), version.Version(since))) + + if __name__ == "__main__": cli() From a103b874dddc6246b06b168992fbdb7aaeb0183f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:03:22 +0100 Subject: [PATCH 19/21] 1.85.0rc1 --- CHANGES.md | 75 +++++++++++++++++++++++++++++++++++++++ changelog.d/10428.removal | 1 - changelog.d/15464.bugfix | 1 - changelog.d/15481.misc | 1 - changelog.d/15537.misc | 1 - changelog.d/15578.misc | 1 - changelog.d/15585.feature | 1 - changelog.d/15597.misc | 1 - changelog.d/15599.bugfix | 1 - changelog.d/15601.bugfix | 1 - changelog.d/15602.misc | 1 - changelog.d/15604.misc | 1 - changelog.d/15606.misc | 1 - changelog.d/15607.bugfix | 1 - changelog.d/15610.misc | 1 - changelog.d/15611.feature | 1 - changelog.d/15613.doc | 1 - changelog.d/15614.bugfix | 1 - changelog.d/15615.misc | 1 - changelog.d/15620.misc | 1 - changelog.d/15621.misc | 1 - changelog.d/15624.bugfix | 1 - changelog.d/15625.misc | 1 - changelog.d/15626.misc | 1 - changelog.d/15630.misc | 1 - changelog.d/15633.misc | 1 - changelog.d/15634.bugfix | 1 - changelog.d/15636.misc | 1 - changelog.d/15639.misc | 1 - changelog.d/15640.misc | 1 - changelog.d/15641.misc | 1 - changelog.d/15642.misc | 1 - changelog.d/15643.misc | 1 - changelog.d/15644.feature | 1 - changelog.d/15646.misc | 1 - changelog.d/15647.bugfix | 1 - changelog.d/15648.doc | 1 - changelog.d/15651.misc | 1 - changelog.d/15658.misc | 1 - changelog.d/15659.misc | 1 - changelog.d/15663.misc | 1 - changelog.d/15665.misc | 1 - changelog.d/15666.misc | 1 - changelog.d/15668.doc | 1 - changelog.d/15678.misc | 1 - changelog.d/15681.misc | 1 - changelog.d/15682.misc | 1 - changelog.d/15683.misc | 1 - changelog.d/15684.misc | 1 - changelog.d/15685.misc | 1 - changelog.d/15686.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 53 files changed, 82 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/10428.removal delete mode 100644 changelog.d/15464.bugfix delete mode 100644 changelog.d/15481.misc delete mode 100644 changelog.d/15537.misc delete mode 100644 changelog.d/15578.misc delete mode 100644 changelog.d/15585.feature delete mode 100644 changelog.d/15597.misc delete mode 100644 changelog.d/15599.bugfix delete mode 100644 changelog.d/15601.bugfix delete mode 100644 changelog.d/15602.misc delete mode 100644 changelog.d/15604.misc delete mode 100644 changelog.d/15606.misc delete mode 100644 changelog.d/15607.bugfix delete mode 100644 changelog.d/15610.misc delete mode 100644 changelog.d/15611.feature delete mode 100644 changelog.d/15613.doc delete mode 100644 changelog.d/15614.bugfix delete mode 100644 changelog.d/15615.misc delete mode 100644 changelog.d/15620.misc delete mode 100644 changelog.d/15621.misc delete mode 100644 changelog.d/15624.bugfix delete mode 100644 changelog.d/15625.misc delete mode 100644 changelog.d/15626.misc delete mode 100644 changelog.d/15630.misc delete mode 100644 changelog.d/15633.misc delete mode 100644 changelog.d/15634.bugfix delete mode 100644 changelog.d/15636.misc delete mode 100644 changelog.d/15639.misc delete mode 100644 changelog.d/15640.misc delete mode 100644 changelog.d/15641.misc delete mode 100644 changelog.d/15642.misc delete mode 100644 changelog.d/15643.misc delete mode 100644 changelog.d/15644.feature delete mode 100644 changelog.d/15646.misc delete mode 100644 changelog.d/15647.bugfix delete mode 100644 changelog.d/15648.doc delete mode 100644 changelog.d/15651.misc delete mode 100644 changelog.d/15658.misc delete mode 100644 changelog.d/15659.misc delete mode 100644 changelog.d/15663.misc delete mode 100644 changelog.d/15665.misc delete mode 100644 changelog.d/15666.misc delete mode 100644 changelog.d/15668.doc delete mode 100644 changelog.d/15678.misc delete mode 100644 changelog.d/15681.misc delete mode 100644 changelog.d/15682.misc delete mode 100644 changelog.d/15683.misc delete mode 100644 changelog.d/15684.misc delete mode 100644 changelog.d/15685.misc delete mode 100644 changelog.d/15686.misc diff --git a/CHANGES.md b/CHANGES.md index 85c9af8ce4..ba0995aa6f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,78 @@ +Synapse 1.85.0rc1 (2023-05-30) +============================== + +Features +-------- + +- Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.75/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.75/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) + + +Bugfixes +-------- + +- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464)) +- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601)) +- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607)) +- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614)) +- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624)) +- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634)) + + +Improved Documentation +---------------------- + +- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613)) +- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648)) +- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668)) + + +Deprecations and Removals +------------------------- + +- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428)) + + +Internal Changes +---------------- + +- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481)) +- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537)) +- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578)) +- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597)) +- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602)) +- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604)) +- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620)) +- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) +- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) +- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) +- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) +- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) +- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) +- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646)) +- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659)) +- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663)) +- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665)) +- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678)) + +### Updates to locked dependencies + +* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642)) +* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681)) +* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682)) +* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685)) +* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643)) +* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651)) +* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641)) +* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686)) +* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640)) +* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683)) +* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684)) +* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639)) + Synapse 1.84.1 (2023-05-26) =========================== diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal deleted file mode 100644 index c056e89585..0000000000 --- a/changelog.d/10428.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/changelog.d/15464.bugfix b/changelog.d/15464.bugfix deleted file mode 100644 index 3c655989b3..0000000000 --- a/changelog.d/15464.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc deleted file mode 100644 index a6e088c164..0000000000 --- a/changelog.d/15481.misc +++ /dev/null @@ -1 +0,0 @@ -Create dependabot changelogs at release time. diff --git a/changelog.d/15537.misc b/changelog.d/15537.misc deleted file mode 100644 index 979e0ba977..0000000000 --- a/changelog.d/15537.misc +++ /dev/null @@ -1 +0,0 @@ -Add not null constraint to column full_user_id of tables profiles and user_filters. diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc deleted file mode 100644 index a54422239b..0000000000 --- a/changelog.d/15578.misc +++ /dev/null @@ -1 +0,0 @@ -Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature deleted file mode 100644 index 1adcfb69ee..0000000000 --- a/changelog.d/15585.feature +++ /dev/null @@ -1 +0,0 @@ -Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc deleted file mode 100644 index 2dea23784f..0000000000 --- a/changelog.d/15597.misc +++ /dev/null @@ -1 +0,0 @@ -Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15599.bugfix +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/changelog.d/15601.bugfix b/changelog.d/15601.bugfix deleted file mode 100644 index 426db6cea3..0000000000 --- a/changelog.d/15601.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. diff --git a/changelog.d/15602.misc b/changelog.d/15602.misc deleted file mode 100644 index cdd0c039bd..0000000000 --- a/changelog.d/15602.misc +++ /dev/null @@ -1 +0,0 @@ -Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. diff --git a/changelog.d/15604.misc b/changelog.d/15604.misc deleted file mode 100644 index 92d1d600bc..0000000000 --- a/changelog.d/15604.misc +++ /dev/null @@ -1 +0,0 @@ -Fix subscriptable type usage in Python <3.9. diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15606.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix deleted file mode 100644 index a2767adbe2..0000000000 --- a/changelog.d/15607.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15610.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15611.feature b/changelog.d/15611.feature deleted file mode 100644 index 7cfb46fd0a..0000000000 --- a/changelog.d/15611.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new admin API to create a new device for a user. diff --git a/changelog.d/15613.doc b/changelog.d/15613.doc deleted file mode 100644 index 94733facf0..0000000000 --- a/changelog.d/15613.doc +++ /dev/null @@ -1 +0,0 @@ -Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. diff --git a/changelog.d/15614.bugfix b/changelog.d/15614.bugfix deleted file mode 100644 index b523ae6eb1..0000000000 --- a/changelog.d/15614.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. diff --git a/changelog.d/15615.misc b/changelog.d/15615.misc deleted file mode 100644 index a39fd0a098..0000000000 --- a/changelog.d/15615.misc +++ /dev/null @@ -1 +0,0 @@ -Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. diff --git a/changelog.d/15620.misc b/changelog.d/15620.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15620.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15621.misc b/changelog.d/15621.misc deleted file mode 100644 index 5d060f4dbc..0000000000 --- a/changelog.d/15621.misc +++ /dev/null @@ -1 +0,0 @@ -Update Mutual Rooms (MSC2666) implementation to match new proposal text. \ No newline at end of file diff --git a/changelog.d/15624.bugfix b/changelog.d/15624.bugfix deleted file mode 100644 index fde515ba62..0000000000 --- a/changelog.d/15624.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). diff --git a/changelog.d/15625.misc b/changelog.d/15625.misc deleted file mode 100644 index 7ea8cc9433..0000000000 --- a/changelog.d/15625.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/15626.misc b/changelog.d/15626.misc deleted file mode 100644 index 0016cdbf10..0000000000 --- a/changelog.d/15626.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the olddeps CI. diff --git a/changelog.d/15630.misc b/changelog.d/15630.misc deleted file mode 100644 index a30304bfd6..0000000000 --- a/changelog.d/15630.misc +++ /dev/null @@ -1 +0,0 @@ -Fix two memory leaks in `trial` test runs. diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc deleted file mode 100644 index 4126a20602..0000000000 --- a/changelog.d/15633.misc +++ /dev/null @@ -1 +0,0 @@ -Trace how many new events from the backfill response we need to process. diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix deleted file mode 100644 index ef39e8a689..0000000000 --- a/changelog.d/15634.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc deleted file mode 100644 index 82329c5e43..0000000000 --- a/changelog.d/15636.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc deleted file mode 100644 index 92230e206f..0000000000 --- a/changelog.d/15639.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc deleted file mode 100644 index 4c2a3dbc52..0000000000 --- a/changelog.d/15640.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc deleted file mode 100644 index a85d85c58e..0000000000 --- a/changelog.d/15641.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sphinx from 6.1.3 to 6.2.1. diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc deleted file mode 100644 index 5d6125140d..0000000000 --- a/changelog.d/15642.misc +++ /dev/null @@ -1 +0,0 @@ -Bump furo from 2023.3.27 to 2023.5.20. diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc deleted file mode 100644 index 5bd2e74071..0000000000 --- a/changelog.d/15643.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pygithub from 1.58.1 to 1.58.2. diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature deleted file mode 100644 index 1b6126af53..0000000000 --- a/changelog.d/15644.feature +++ /dev/null @@ -1 +0,0 @@ -Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc deleted file mode 100644 index 872afe30b8..0000000000 --- a/changelog.d/15646.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15647.bugfix +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc deleted file mode 100644 index 70f65ebbff..0000000000 --- a/changelog.d/15648.doc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc deleted file mode 100644 index 4d7c0248b2..0000000000 --- a/changelog.d/15651.misc +++ /dev/null @@ -1 +0,0 @@ -Bump requests from 2.28.2 to 2.31.0. diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15658.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15659.misc b/changelog.d/15659.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15659.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc deleted file mode 100644 index cc5f801543..0000000000 --- a/changelog.d/15663.misc +++ /dev/null @@ -1 +0,0 @@ -Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/changelog.d/15665.misc b/changelog.d/15665.misc deleted file mode 100644 index 7ad424d8df..0000000000 --- a/changelog.d/15665.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up rebuilding of the user directory for local users. diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15666.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc deleted file mode 100644 index 3526a4d50c..0000000000 --- a/changelog.d/15668.doc +++ /dev/null @@ -1 +0,0 @@ -Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15678.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc deleted file mode 100644 index 2de551dd63..0000000000 --- a/changelog.d/15681.misc +++ /dev/null @@ -1 +0,0 @@ -Bump log from 0.4.17 to 0.4.18. diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc deleted file mode 100644 index 687af7d8d7..0000000000 --- a/changelog.d/15682.misc +++ /dev/null @@ -1 +0,0 @@ -Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc deleted file mode 100644 index 147f13b99c..0000000000 --- a/changelog.d/15683.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc deleted file mode 100644 index 4c2edf87fd..0000000000 --- a/changelog.d/15684.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc deleted file mode 100644 index 7d4cf65bf3..0000000000 --- a/changelog.d/15685.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pydantic from 1.10.7 to 1.10.8. diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc deleted file mode 100644 index feacbf35d6..0000000000 --- a/changelog.d/15686.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/debian/changelog b/debian/changelog index fbdc9c177e..2d88cd9d29 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium + + * New Synapse release 1.85.0rc1. + + -- Synapse Packaging team Tue, 30 May 2023 13:56:54 +0100 + matrix-synapse-py3 (1.84.1) stable; urgency=medium * New Synapse release 1.84.1. diff --git a/pyproject.toml b/pyproject.toml index 6e9bce65b6..7227bc7523 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.1" +version = "1.85.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From cebff6f4d584683bc122686e38342dbd8699818e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:05:44 +0100 Subject: [PATCH 20/21] Tweak release script dependabot wording --- scripts-dev/release.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 257d1e9ebd..89ffba8d92 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -875,6 +875,8 @@ def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> for i, message in enumerate(messages): messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) messages.insert(0, "### Updates to locked dependencies\n") + # Add an extra blank line to the bottom of the section + messages.append("") return "\n".join(messages) From 3389653e1522c9aaea227b2afa36acd5db3ad9fe Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:18:42 +0100 Subject: [PATCH 21/21] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ba0995aa6f..636c591568 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,8 +47,8 @@ Internal Changes - Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) - Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) - Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) -- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) -- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) - Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) - Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) - Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630))